entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
MaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/dr/cdrzsiegawudzaa2z2yiqd6a6fofxzce6qjjo4enhayl42oawcno.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaxPool(nn.Module):
def __init__(self, kernel_size, stride):
super(MaxPool, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride)
def forward(self, x):
x = self.pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4, 'stride': 1}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + x0, tmp30, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MaxPoolNew(nn.Module):
def __init__(self, kernel_size, stride):
super(MaxPoolNew, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hiroaki-Ozaki/modelib-classification | MaxPool | false | 17,382 | [
"WTFPL"
] | 10 | 11077704cc0bc9a42fc4b94da60b57d31ff0f65c | https://github.com/Hiroaki-Ozaki/modelib-classification/tree/11077704cc0bc9a42fc4b94da60b57d31ff0f65c |
MeanVoxelFeatureExtractor | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/7g/c7goq4oyslacjqrxulmuyjsilcpec5xydttnbzdkfzow6lghvs4x.py
# Topologically Sorted Source Nodes: [sum_1, points_mean], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# points_mean => div
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %view), kwargs = {})
triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (64, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, points_mean], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sum_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((64, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class VoxelFeatureExtractor(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def get_output_feature_dim(self):
raise NotImplementedError
def forward(self, **kwargs):
raise NotImplementedError
class MeanVoxelFeatureExtractor(VoxelFeatureExtractor):
def __init__(self, **kwargs):
super().__init__()
def get_output_feature_dim(self):
return cfg.DATA_CONFIG.NUM_POINT_FEATURES['use']
def forward(self, features, num_voxels, **kwargs):
"""
:param features: (N, max_points_of_each_voxel, 3 + C)
:param num_voxels: (N)
:param kwargs:
:return:
"""
points_mean = features[:, :, :].sum(dim=1, keepdim=False
) / num_voxels.type_as(features).view(-1, 1)
return points_mean.contiguous()
def get_inputs():
return [torch.rand([64, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (64, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class VoxelFeatureExtractor(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def get_output_feature_dim(self):
raise NotImplementedError
def forward(self, **kwargs):
raise NotImplementedError
class MeanVoxelFeatureExtractorNew(VoxelFeatureExtractor):
def __init__(self, **kwargs):
super().__init__()
def get_output_feature_dim(self):
return cfg.DATA_CONFIG.NUM_POINT_FEATURES['use']
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Hub-Tian/CADNet | MeanVoxelFeatureExtractor | false | 17,383 | [
"Apache-2.0"
] | 7 | 37d2be6121bb184d8ded92fa468cb6490a15caea | https://github.com/Hub-Tian/CADNet/tree/37d2be6121bb184d8ded92fa468cb6490a15caea |
SAM | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/bf/cbfr56ilqekb7m45ts26eymcmsuqg3jgubvm2hhszluf2sikbii6.py
# Topologically Sorted Source Nodes: [conv2d_1, img], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# img => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_6), kwargs = {})
triton_poi_fused_add_convolution_0 = async_compile.triton('triton_poi_fused_add_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wr/cwrvhmabcr6zpxa55ag3ut4fn3g75ji7lx7jfyzuafhqxmadqgrl.py
# Topologically Sorted Source Nodes: [x1, conv2d_2, x2, x1_1, x1_2], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x1 => convolution
# x1_1 => mul
# x1_2 => add_1
# x2 => sigmoid
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %primals_7, %primals_8, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_convolution_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_sigmoid_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x3), None)
tmp4 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x3), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(in_out_ptr1 + (x3), tmp5, None)
tl.store(out_ptr0 + (x3), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (3, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (3, ), (1, ))
assert_size_stride(primals_6, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_7, (4, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, img], Original ATen: [aten.convolution, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_0.run(buf3, primals_5, primals_6, 49152, grid=grid(49152), stream=stream0)
del primals_5
del primals_6
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1, conv2d_2, x2, x1_1, x1_2], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_sigmoid_1.run(buf1, buf5, primals_2, primals_8, primals_3, buf6, 65536, grid=grid(65536), stream=stream0)
del primals_2
del primals_8
return (buf6, buf3, primals_1, primals_3, primals_4, primals_7, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((3, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size, padding=
kernel_size // 2, bias=bias, stride=stride)
class SAM(nn.Module):
def __init__(self, n_feat, kernel_size=3, bias=True):
super(SAM, self).__init__()
self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias)
self.conv2 = conv(n_feat, 3, kernel_size, bias=bias)
self.conv3 = conv(3, n_feat, kernel_size, bias=bias)
def forward(self, x, x_img):
x1 = self.conv1(x)
img = self.conv2(x) + x_img
x2 = torch.sigmoid(self.conv3(img))
x1 = x1 * x2
x1 = x1 + x
return x1, img
def get_inputs():
return [torch.rand([4, 4, 64, 64]), torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'n_feat': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_mul_sigmoid_1(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, None)
tmp4 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(in_out_ptr1 + x3, tmp5, None)
tl.store(out_ptr0 + x3, tmp9, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (3, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (3,), (1,))
assert_size_stride(primals_6, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_7, (4, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf3 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused_add_convolution_0[grid(49152)](buf3, primals_5,
primals_6, 49152, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
del primals_6
buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0
del buf0
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_convolution_mul_sigmoid_1[grid(65536)](buf1,
buf5, primals_2, primals_8, primals_3, buf6, 65536, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
del primals_8
return (buf6, buf3, primals_1, primals_3, primals_4, primals_7, buf1,
buf3, buf5)
def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size, padding=
kernel_size // 2, bias=bias, stride=stride)
class SAMNew(nn.Module):
def __init__(self, n_feat, kernel_size=3, bias=True):
super(SAMNew, self).__init__()
self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias)
self.conv2 = conv(n_feat, 3, kernel_size, bias=bias)
self.conv3 = conv(3, n_feat, kernel_size, bias=bias)
def forward(self, input_0, input_1):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_7 = self.conv3.weight
primals_8 = self.conv3.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| HolyWu/vs-hinet | SAM | false | 17,384 | [
"MIT"
] | 4 | b1083ab169d082696d4bf40281922ee52c762714 | https://github.com/HolyWu/vs-hinet/tree/b1083ab169d082696d4bf40281922ee52c762714 |
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/gw/cgwk5ph5ltngusgxtmjwtbqptx2dthbkyuj2tip4xelfenun273j.py
# Topologically Sorted Source Nodes: [sub, add, output, mul, output_1], Original ATen: [aten.sub, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul
# output => div
# output_1 => add_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %unsqueeze), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_1, 1e-10), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %expand), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_1), kwargs = {})
triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-10
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, add, output, mul, output_1], Original ATen: [aten.sub, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Layer normalization class. Normalization is done on the last dimension
Args:
input_size: size of input sample
Inputs:
a Tensor with shape (batch, length, input_size) or (batch, input_size)
Outputs:
a Tensor with shape (batch, length, input_size) or (batch, input_size)
"""
def __init__(self, input_size, eps=1e-10):
super(LayerNorm, self).__init__()
self.eps = eps
self.a = nn.Parameter(torch.ones(input_size))
self.b = nn.Parameter(torch.zeros(input_size))
def forward(self, input):
mu = input.mean(-1).unsqueeze(-1)
sigma = input.std(-1).unsqueeze(-1)
output = (input - mu) / (sigma + self.eps)
output = output * self.a.expand_as(output) + self.b.expand_as(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-10
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
"""Layer normalization class. Normalization is done on the last dimension
Args:
input_size: size of input sample
Inputs:
a Tensor with shape (batch, length, input_size) or (batch, input_size)
Outputs:
a Tensor with shape (batch, length, input_size) or (batch, input_size)
"""
def __init__(self, input_size, eps=1e-10):
super(LayerNormNew, self).__init__()
self.eps = eps
self.a = nn.Parameter(torch.ones(input_size))
self.b = nn.Parameter(torch.zeros(input_size))
def forward(self, input_0):
primals_2 = self.a
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Hritikbansal/RNNs_SVA_OOD | LayerNorm | false | 17,385 | [
"MIT"
] | 4 | a1c73955342d9d35c49da5fcb7b315e37b0f75d1 | https://github.com/Hritikbansal/RNNs_SVA_OOD/tree/a1c73955342d9d35c49da5fcb7b315e37b0f75d1 |
ArcMarginProduct | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/wz/cwzcah4g5b3ejt55evsx6ffyi2xbfvkkf2onb5k7hy6quvpcd2du.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/g7/cg74b7dk6b63qqgiwzjxhp4x5hqpdqk3ofb5vbtk76tjjunp37hd.py
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize_1 => div_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bd/cbdqsidpio3k3d4u4qyxa5zvfuxnfecq3rxmkzjw5ke43fdvkatu.py
# Topologically Sorted Source Nodes: [pow_1, sub, clamp, sine, mul, mul_1, phi, gt, sub_2, phi_1, mul_2, sub_3, mul_3, output, output_1], Original ATen: [aten.pow, aten.rsub, aten.clamp, aten.sqrt, aten.mul, aten.sub, aten.gt, aten.where, aten.add]
# Source node to ATen node mapping:
# clamp => clamp_max, clamp_min_2
# gt => gt
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# output => add
# output_1 => mul_4
# phi => sub_1
# phi_1 => where
# pow_1 => pow_5
# sine => sqrt
# sub => sub
# sub_2 => sub_2
# sub_3 => sub_3
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %pow_5), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_max,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.8775825618903728), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, 0.479425538604203), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, -0.8775825618903726), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, 0.23971276930210156), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %sub_1, %sub_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %where), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %primals_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %view_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 30.0), kwargs = {})
triton_poi_fused_add_clamp_gt_mul_pow_rsub_sqrt_sub_where_2 = async_compile.triton('triton_poi_fused_add_clamp_gt_mul_pow_rsub_sqrt_sub_where_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_gt_mul_pow_rsub_sqrt_sub_where_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_gt_mul_pow_rsub_sqrt_sub_where_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = -0.8775825618903726
tmp3 = tmp1 > tmp2
tmp4 = 0.8775825618903728
tmp5 = tmp1 * tmp4
tmp6 = tmp1 * tmp1
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = triton_helpers.minimum(tmp10, tmp7)
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 0.479425538604203
tmp14 = tmp12 * tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.23971276930210156
tmp17 = tmp1 - tmp16
tmp18 = tl.where(tmp3, tmp15, tmp17)
tmp19 = tmp0 * tmp18
tmp20 = tmp7 - tmp0
tmp21 = tmp20 * tmp1
tmp22 = tmp19 + tmp21
tmp23 = 30.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + (x0), tmp24, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cosine], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sub, clamp, sine, mul, mul_1, phi, gt, sub_2, phi_1, mul_2, sub_3, mul_3, output, output_1], Original ATen: [aten.pow, aten.rsub, aten.clamp, aten.sqrt, aten.mul, aten.sub, aten.gt, aten.where, aten.add]
triton_poi_fused_add_clamp_gt_mul_pow_rsub_sqrt_sub_where_2.run(primals_3, buf2, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, primals_2, primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
from torch.nn import Parameter
class ArcMarginProduct(nn.Module):
"""Implement of large margin arc distance: :
Args:
in_features: size of each input sample
out_features: size of each output sample
s: norm of input feature
m: margin
cos(theta + m)
"""
def __init__(self, in_features, out_features, s=30.0, m=0.5,
easy_margin=False):
super(ArcMarginProduct, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.weight = Parameter(data=torch.randn(size=(out_features,
in_features), dtype=torch.float32, requires_grad=True),
requires_grad=True)
nn.init.xavier_uniform_(self.weight)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, inputs, targets):
cosine = F.linear(F.normalize(inputs), F.normalize(self.weight))
sine = torch.sqrt((1.0 - torch.pow(cosine, 2)).clamp(0, 1))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
output = targets * phi + (1.0 - targets) * cosine
output *= self.s
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.nn.parallel
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_clamp_gt_mul_pow_rsub_sqrt_sub_where_2(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = -0.8775825618903726
tmp3 = tmp1 > tmp2
tmp4 = 0.8775825618903728
tmp5 = tmp1 * tmp4
tmp6 = tmp1 * tmp1
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = triton_helpers.minimum(tmp10, tmp7)
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 0.479425538604203
tmp14 = tmp12 * tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.23971276930210156
tmp17 = tmp1 - tmp16
tmp18 = tl.where(tmp3, tmp15, tmp17)
tmp19 = tmp0 * tmp18
tmp20 = tmp7 - tmp0
tmp21 = tmp20 * tmp1
tmp22 = tmp19 + tmp21
tmp23 = 30.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + x0, tmp24, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_gt_mul_pow_rsub_sqrt_sub_where_2[grid(256)](
primals_3, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf3, primals_2, primals_3, reinterpret_tensor(buf0, (64, 4), (4,
1), 0), buf2
class ArcMarginProductNew(nn.Module):
"""Implement of large margin arc distance: :
Args:
in_features: size of each input sample
out_features: size of each output sample
s: norm of input feature
m: margin
cos(theta + m)
"""
def __init__(self, in_features, out_features, s=30.0, m=0.5,
easy_margin=False):
super(ArcMarginProductNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.weight = Parameter(data=torch.randn(size=(out_features,
in_features), dtype=torch.float32, requires_grad=True),
requires_grad=True)
nn.init.xavier_uniform_(self.weight)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, input_0, input_1):
primals_2 = self.weight
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| HotaekHan/classification_uncertainty | ArcMarginProduct | false | 17,386 | [
"MIT"
] | 5 | f0f119b93a84f7b041baf4eddf835dd99013e6a3 | https://github.com/HotaekHan/classification_uncertainty/tree/f0f119b93a84f7b041baf4eddf835dd99013e6a3 |
UNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/pp/cppgfqjyf4o5nju3bivwfhzfdosq3jovlzspw5jkb7m6zbgxdctc.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yf/cyf42wxdziimhugzenalpsudm3t3jydck5ciuwfnympejpndlo7p.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_1 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_1, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wi/cwiprwztxrvob5ev4kyydlbq3jrbwfljwpifkysefw4hw76wuirr.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.1), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xa/cxaobyii3thui2jyrx6u3hxg4uco4db6ngljbdax65ouokpv5i4o.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_4 => avg_pool2d_1
# Graph fragment:
# %avg_pool2d_1 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_3, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/va/cva2hyewhrnlmu6nomxuieecx6rzfkmrwspnissqjfz4rs2gbsjo.py
# Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_5 => gt_4, mul_4, where_4
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_1, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.1), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ar/car3qqt4v3nfj2zaad4tab76a5w25tag2ixhvbtwemcehvge5axh.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_7 => avg_pool2d_2
# Graph fragment:
# %avg_pool2d_2 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_5, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_5 = async_compile.triton('triton_poi_fused_avg_pool2d_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uw/cuwf3po3terc7sywuuwlq6pfzj5bhqju3ru523dh3rlemlmo5lck.py
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x_8 => gt_6, mul_6, where_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_2, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.1), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {})
triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ua/cuafsfudmv3fi2hgkyiorqm7ncpbt5f23dvyrfwuszsfwhbdea5s.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_10 => avg_pool2d_3
# Graph fragment:
# %avg_pool2d_3 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_7, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_7 = async_compile.triton('triton_poi_fused_avg_pool2d_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ik/cikhj6vf7jn4zofluahdruv6rzspohckkhbkdnzx2bcs4wmwnzfi.py
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_8 => convolution_8
# x_11 => gt_8, mul_8, where_8
# Graph fragment:
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_3, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_8 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.1), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_8, %mul_8), kwargs = {})
triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/b3/cb3m7qvsldrgcxtuicu44ok337wwzup6n4jfk6xflrdgcibgjlo4.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_13 => avg_pool2d_4
# Graph fragment:
# %avg_pool2d_4 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_9, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_9 = async_compile.triton('triton_poi_fused_avg_pool2d_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yt/cyti3mtoirawss33utifqjcwp6wqqbp4zyihcqibuj4etxw24e53.py
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# x_14 => gt_10, mul_10, where_10
# Graph fragment:
# %convolution_10 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_4, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_10 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_10, 0), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_10, 0.1), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_10, %convolution_10, %mul_10), kwargs = {})
triton_poi_fused_convolution_leaky_relu_10 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7d/c7dkex3fn4u7odpzztbfdeqi3xh6vlxmnujnckgbllx2bq2itujd.py
# Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_11 => convolution_11
# x_15 => gt_11
# Graph fragment:
# %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_11 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_11, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_11 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/w3/cw3t2fno46ezf3qh4rr72fio4lkev3urrdfocdufl2a7yif7mrcp.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_16 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_12 = async_compile.triton('triton_poi_fused__to_copy_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hm/chm4mcvle6topics3752p4u6rpmw4rchcakqbrx2uilntq2yo2vx.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_16 => add, clamp_max
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 1), kwargs = {})
triton_poi_fused_add_clamp_13 = async_compile.triton('triton_poi_fused_add_clamp_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rj/crj4k65usvvtzcdftn54wkrcrv2qpk6z4qmwikghjeiixjonaahf.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x_16 => clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_12, sub
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0.3333333333333333), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_12, 0.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_14 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ik/cikb3lupetxcwpp34tgegdb25lonhltpexk4fqgzftjfm2dy4ly2.py
# Topologically Sorted Source Nodes: [conv2d_11, x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_11 => convolution_11
# x_15 => mul_11, where_11
# x_16 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, mul_14, mul_15, mul_16, sub_1, sub_2, sub_4
# Graph fragment:
# %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_11, 0.1), kwargs = {})
# %where_11 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_11, %convolution_11, %mul_11), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_14), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_15), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_16), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4) % 4
x0 = xindex % 4
x6 = (xindex // 16)
x2 = (xindex // 16) % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xe/cxek4r75ijg7i7iroessx5v5foooiilhrk5vakzdfcf62pieyhcz.py
# Topologically Sorted Source Nodes: [conv2d_12, x_17], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_12 => convolution_12
# x_17 => gt_12
# Graph fragment:
# %convolution_12 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_4, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_12 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_12, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_16 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6e/c6errgpf4oaweqlvo54imwyvxrld6cj53cnjo5yhmvcovtny7veb.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_12, %where_9], 1), kwargs = {})
triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 1024
x0 = xindex % 16
x2 = (xindex // 16384)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (8192*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (16*x1) + (8192*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 1024, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (16*((-512) + x1)) + (8192*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ck/ccklouprcfwudrv7yg2q2imj5s7wenra6usifiqaws2hotwav6a3.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_19 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_18 = async_compile.triton('triton_poi_fused__to_copy_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zn/cznf3tqo3himpyq2kvi4tlwa22wy4n2urvzftpq3zbpfpg2joslh.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_19 => add_5, clamp_max_4
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_5, 3), kwargs = {})
triton_poi_fused_add_clamp_19 = async_compile.triton('triton_poi_fused_add_clamp_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fg/cfg3obxwkvn4kw3uma5jotnoxogitsze7hm37g5mytvyfnu55rxs.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x_19 => clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_19, sub_5
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_4, 0.42857142857142855), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_19, 0.0), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_20 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_20(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4r/c4r245ldeqn6vzszrhr54lcwzggestyuilvqjgijw6yubepza2bj.py
# Topologically Sorted Source Nodes: [conv2d_13, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# x_18 => mul_18, where_13
# x_19 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_7, add_8, add_9, mul_21, mul_22, mul_23, sub_6, sub_7, sub_9
# Graph fragment:
# %convolution_13 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_13, 0.1), kwargs = {})
# %where_13 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_13, %convolution_13, %mul_18), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_6), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_21), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_6), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_22), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %add_7), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %clamp_max_7), kwargs = {})
# %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_23), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 8) % 8
x0 = xindex % 8
x6 = (xindex // 64)
x2 = (xindex // 64) % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/tl/ctlfbz4hquxzdnijg26p2tgvggcl7rl3eomuvv2tgynkshw2fcz5.py
# Topologically Sorted Source Nodes: [conv2d_14, x_20], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_14 => convolution_14
# x_20 => gt_14
# Graph fragment:
# %convolution_14 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_9, %primals_30, %primals_31, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_14 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_14, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_22 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vi/cvivfqvs63xq3uikydhryok7lv2recrozl43zocaroea25yr44n6.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_14, %where_7], 1), kwargs = {})
triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 512
x0 = xindex % 64
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 512, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6n/c6nq7g6z66sqclgcahcpwxpo7qgpbk53acr7lmuf4tlvpyfttgxz.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_22 => convert_element_type_9
# Graph fragment:
# %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
triton_poi_fused__to_copy_24 = async_compile.triton('triton_poi_fused__to_copy_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/je/cjenm3kh6c6ivjojkt643dkosuc3xeqrcpuwsrxnwxyorrqvjqej.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_22 => add_10, clamp_max_8
# Graph fragment:
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {})
# %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_10, 7), kwargs = {})
triton_poi_fused_add_clamp_25 = async_compile.triton('triton_poi_fused_add_clamp_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 7, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/u7/cu7uy5w44ijgcs37t3jhw76s5tzp7dc4qaowtk4z4p4pb5ttnu7g.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x_22 => clamp_max_10, clamp_min_10, clamp_min_8, convert_element_type_8, iota_4, mul_26, sub_10
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_4, torch.float32), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_8, 0.4666666666666667), kwargs = {})
# %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_26, 0.0), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_26 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6r/c6rj7s3hdsnxdeuueuqg6mqpmjxhirrrre544wagvl7xru3dlv4v.py
# Topologically Sorted Source Nodes: [conv2d_15, x_21, x_22], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_15 => convolution_15
# x_21 => mul_25, where_15
# x_22 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_12, add_13, add_14, mul_28, mul_29, mul_30, sub_11, sub_12, sub_14
# Graph fragment:
# %convolution_15 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_15, 0.1), kwargs = {})
# %where_15 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %convolution_15, %mul_25), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_10), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_28), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_10), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_29), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %add_12), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %clamp_max_11), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %mul_30), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x6 = (xindex // 256)
x2 = (xindex // 256) % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xh/cxhrvuoyci4igqzzcgcr37vlhi6diigdp2hl2mzgsqljilvpbq2c.py
# Topologically Sorted Source Nodes: [conv2d_16, x_23], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_16 => convolution_16
# x_23 => gt_16
# Graph fragment:
# %convolution_16 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_14, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_16 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_16, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_28 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ca/ccao7krq57rdd66r2ez5jbhtv4ua7635t2c26yvqgg43yyw2rlkp.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_16, %where_5], 1), kwargs = {})
triton_poi_fused_cat_29 = async_compile.triton('triton_poi_fused_cat_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x2 = (xindex // 65536)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 256, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bf/cbf4sfjyvrqlnalkdyweiapbphczq3e2pvid47ppsp74msqxktyg.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_25 => convert_element_type_13
# Graph fragment:
# %convert_element_type_13 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {})
triton_poi_fused__to_copy_30 = async_compile.triton('triton_poi_fused__to_copy_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/q7/cq7lffziiksn5cssy3hi6axjczq676uhjbzgtjusryl3bhymxxk4.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_25 => add_15, clamp_max_12
# Graph fragment:
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_13, 1), kwargs = {})
# %clamp_max_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_15, 15), kwargs = {})
triton_poi_fused_add_clamp_31 = async_compile.triton('triton_poi_fused_add_clamp_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 15, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wn/cwnqcmrfmh7h62dn3oafx5smyo4lbaq3zwj6ebyflwbmc34yzaok.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x_25 => clamp_max_14, clamp_min_12, clamp_min_14, convert_element_type_12, iota_6, mul_33, sub_15
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_6, torch.float32), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_12, 0.4838709677419355), kwargs = {})
# %clamp_min_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_33, 0.0), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_12, %convert_element_type_15), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_15, 0.0), kwargs = {})
# %clamp_max_14 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_32 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_32(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mc/cmcqbkpppsdqh6xkx3ftqaephwn2rbo5yr4ikkej6vw77idmbmjl.py
# Topologically Sorted Source Nodes: [conv2d_17, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_17 => convolution_17
# x_24 => mul_32, where_17
# x_25 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_17, add_18, add_19, mul_35, mul_36, mul_37, sub_16, sub_17, sub_19
# Graph fragment:
# %convolution_17 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_36, %primals_37, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_17, 0.1), kwargs = {})
# %where_17 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_17, %convolution_17, %mul_32), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max_14), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_35), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_14), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_36), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_18, %add_17), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %clamp_max_15), kwargs = {})
# %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %mul_37), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x6 = (xindex // 1024)
x2 = (xindex // 1024) % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ef/cefamdmkw2jkmbuaq3a2jonekrinxkrotisaonk4227ffk4omcvw.py
# Topologically Sorted Source Nodes: [conv2d_18, x_26], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_18 => convolution_18
# x_26 => gt_18
# Graph fragment:
# %convolution_18 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_19, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_18 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_18, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_34 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cy/ccyjmkzlhezsdxat7ueu3adkydtqpg5wylzh2jt7n6lyhxpegx25.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_18, %where_3], 1), kwargs = {})
triton_poi_fused_cat_35 = async_compile.triton('triton_poi_fused_cat_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 128
x0 = xindex % 1024
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 128, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mm/cmmcmntldhsgr3z344jbw6wne3xvybqexjywb25izyqmke43nzkc.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_28 => convert_element_type_17
# Graph fragment:
# %convert_element_type_17 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_8, torch.int64), kwargs = {})
triton_poi_fused__to_copy_36 = async_compile.triton('triton_poi_fused__to_copy_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_36', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vr/cvrmd6xdzee5uvmwgicckvv4pdfbydcwureaihml45n57uxxemmz.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_28 => add_20, clamp_max_16
# Graph fragment:
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_17, 1), kwargs = {})
# %clamp_max_16 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_20, 31), kwargs = {})
triton_poi_fused_add_clamp_37 = async_compile.triton('triton_poi_fused_add_clamp_37', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_37', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 31, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/m6/cm645up6kvhpytpm6b47j7a4vvqgdgd7cvqqt6kgyf6fi54kqfxh.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x_28 => clamp_max_18, clamp_min_16, clamp_min_18, convert_element_type_16, iota_8, mul_40, sub_20
# Graph fragment:
# %iota_8 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_16 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_8, torch.float32), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_16, 0.49206349206349204), kwargs = {})
# %clamp_min_16 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_40, 0.0), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_16, %convert_element_type_19), kwargs = {})
# %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_20, 0.0), kwargs = {})
# %clamp_max_18 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_18, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_38 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_38', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_38', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_38(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/l4/cl4kihlxewyneqcsmoclikcvqyu4s7q2xjbf2u6lxqf63tvo7z4t.py
# Topologically Sorted Source Nodes: [conv2d_19, x_27, x_28], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_19 => convolution_19
# x_27 => mul_39, where_19
# x_28 => _unsafe_index_16, _unsafe_index_17, _unsafe_index_18, _unsafe_index_19, add_22, add_23, add_24, mul_42, mul_43, mul_44, sub_21, sub_22, sub_24
# Graph fragment:
# %convolution_19 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_19, 0.1), kwargs = {})
# %where_19 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_19, %convolution_19, %mul_39), kwargs = {})
# %_unsafe_index_16 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %convert_element_type_17, %convert_element_type_19]), kwargs = {})
# %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %convert_element_type_17, %clamp_max_17]), kwargs = {})
# %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %clamp_max_16, %convert_element_type_19]), kwargs = {})
# %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %clamp_max_16, %clamp_max_17]), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_17, %_unsafe_index_16), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %clamp_max_18), kwargs = {})
# %add_22 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_16, %mul_42), kwargs = {})
# %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_19, %_unsafe_index_18), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %clamp_max_18), kwargs = {})
# %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_18, %mul_43), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_23, %add_22), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %clamp_max_19), kwargs = {})
# %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_22, %mul_44), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x6 = (xindex // 4096)
x2 = (xindex // 4096) % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/37/c37c4lnlmya6pvyd3vpij2eou3upundn3osm7w4ebrbtepglis3v.py
# Topologically Sorted Source Nodes: [conv2d_20, x_29], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_20 => convolution_20
# x_29 => gt_20
# Graph fragment:
# %convolution_20 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_24, %primals_42, %primals_43, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_20 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_20, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_40 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_40', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_40', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2p/c2pw4eugdhlj6xrkmfzylo57nwa7wnqgs4grxdbuartndnuv4o5n.py
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_4 => cat_4
# Graph fragment:
# %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_20, %where_1], 1), kwargs = {})
triton_poi_fused_cat_41 = async_compile.triton('triton_poi_fused_cat_41', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 64
x0 = xindex % 4096
x2 = (xindex // 262144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 64, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ke/ckebbcnbt7ptcvycxu7br4twcc4qebsu3q766rulpktfmhq2b4jq.py
# Topologically Sorted Source Nodes: [conv2d_22, x_31], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_22 => convolution_22
# x_31 => gt_22, mul_47, where_22
# Graph fragment:
# %convolution_22 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_21, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_22 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_22, 0), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_22, 0.1), kwargs = {})
# %where_22 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_22, %convolution_22, %mul_47), kwargs = {})
triton_poi_fused_convolution_leaky_relu_42 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_42', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_42', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128, ), (1, ))
assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_29, (512, ), (1, ))
assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (256, ), (1, ))
assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (256, ), (1, ))
assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_35, (128, ), (1, ))
assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (64, ), (1, ))
assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (64, ), (1, ))
assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_43, (32, ), (1, ))
assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (32, ), (1, ))
assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, s1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 524288, grid=grid(524288), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(buf5, buf6, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_7, buf8, buf9, 262144, grid=grid(262144), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf12 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf10, primals_9, buf11, buf12, 262144, grid=grid(262144), stream=stream0)
del primals_9
buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_3.run(buf12, buf13, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1))
buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf14, primals_11, buf15, buf16, 131072, grid=grid(131072), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1))
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf19 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf17, primals_13, buf18, buf19, 131072, grid=grid(131072), stream=stream0)
del primals_13
buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_5.run(buf19, buf20, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1))
buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf21, primals_15, buf22, buf23, 65536, grid=grid(65536), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf26 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x_9], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf24, primals_17, buf25, buf26, 65536, grid=grid(65536), stream=stream0)
del primals_17
buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_7.run(buf26, buf27, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1))
buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_8.run(buf28, primals_19, buf29, buf30, 32768, grid=grid(32768), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1))
buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
buf33 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, x_12], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_8.run(buf31, primals_21, buf32, buf33, 32768, grid=grid(32768), stream=stream0)
del primals_21
buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_9.run(buf33, buf34, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1))
buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_10.run(buf35, primals_23, buf36, buf37, 8192, grid=grid(8192), stream=stream0)
del buf35
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1))
buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_11.run(buf38, primals_25, buf39, 8192, grid=grid(8192), stream=stream0)
buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_12.run(buf40, 4, grid=grid(4), stream=stream0)
buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf41, 4, grid=grid(4), stream=stream0)
buf42 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_12.run(buf42, 4, grid=grid(4), stream=stream0)
buf43 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf43, 4, grid=grid(4), stream=stream0)
buf46 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_14.run(buf46, 4, grid=grid(4), stream=stream0)
buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_14.run(buf48, 4, grid=grid(4), stream=stream0)
buf45 = buf31; del buf31 # reuse
buf49 = buf45; del buf45 # reuse
buf50 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15.run(buf50, buf41, buf42, buf39, buf38, primals_25, buf40, buf43, buf46, buf48, 32768, grid=grid(32768), stream=stream0)
del buf38
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1))
buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_12, x_17], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_16.run(buf51, primals_27, buf52, 32768, grid=grid(32768), stream=stream0)
buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_17.run(buf52, buf51, primals_27, buf33, buf53, 65536, grid=grid(65536), stream=stream0)
del buf51
del primals_27
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1))
buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_13, x_18], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_16.run(buf54, primals_29, buf55, 32768, grid=grid(32768), stream=stream0)
buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_18.run(buf56, 8, grid=grid(8), stream=stream0)
buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_19.run(buf57, 8, grid=grid(8), stream=stream0)
buf58 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_18.run(buf58, 8, grid=grid(8), stream=stream0)
buf59 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_19.run(buf59, 8, grid=grid(8), stream=stream0)
buf62 = empty_strided_cuda((8, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_20.run(buf62, 8, grid=grid(8), stream=stream0)
buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_20.run(buf64, 8, grid=grid(8), stream=stream0)
buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0); del buf17 # reuse
buf65 = buf61; del buf61 # reuse
buf66 = buf65; del buf65 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21.run(buf66, buf57, buf58, buf55, buf54, primals_29, buf56, buf59, buf62, buf64, 131072, grid=grid(131072), stream=stream0)
del buf54
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1))
buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_14, x_20], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_22.run(buf67, primals_31, buf68, 65536, grid=grid(65536), stream=stream0)
buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_23.run(buf68, buf67, primals_31, buf26, buf69, 131072, grid=grid(131072), stream=stream0)
del buf67
del primals_31
# Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1))
buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_15, x_21], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_22.run(buf70, primals_33, buf71, 65536, grid=grid(65536), stream=stream0)
buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_24.run(buf72, 16, grid=grid(16), stream=stream0)
buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_25.run(buf73, 16, grid=grid(16), stream=stream0)
buf74 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_24.run(buf74, 16, grid=grid(16), stream=stream0)
buf75 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_25.run(buf75, 16, grid=grid(16), stream=stream0)
buf78 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_26.run(buf78, 16, grid=grid(16), stream=stream0)
buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_26.run(buf80, 16, grid=grid(16), stream=stream0)
buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16, 1), 0); del buf10 # reuse
buf81 = buf77; del buf77 # reuse
buf82 = buf81; del buf81 # reuse
# Topologically Sorted Source Nodes: [conv2d_15, x_21, x_22], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27.run(buf82, buf73, buf74, buf71, buf70, primals_33, buf72, buf75, buf78, buf80, 262144, grid=grid(262144), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1))
buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_16, x_23], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_28.run(buf83, primals_35, buf84, 131072, grid=grid(131072), stream=stream0)
buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_29.run(buf84, buf83, primals_35, buf19, buf85, 262144, grid=grid(262144), stream=stream0)
del buf83
del primals_35
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1))
buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_17, x_24], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_28.run(buf86, primals_37, buf87, 131072, grid=grid(131072), stream=stream0)
buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_30.run(buf88, 32, grid=grid(32), stream=stream0)
buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_31.run(buf89, 32, grid=grid(32), stream=stream0)
buf90 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_30.run(buf90, 32, grid=grid(32), stream=stream0)
buf91 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_31.run(buf91, 32, grid=grid(32), stream=stream0)
buf94 = empty_strided_cuda((32, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_32.run(buf94, 32, grid=grid(32), stream=stream0)
buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_32.run(buf96, 32, grid=grid(32), stream=stream0)
buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0); del buf3 # reuse
buf97 = buf93; del buf93 # reuse
buf98 = buf97; del buf97 # reuse
# Topologically Sorted Source Nodes: [conv2d_17, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33.run(buf98, buf89, buf90, buf87, buf86, primals_37, buf88, buf91, buf94, buf96, 524288, grid=grid(524288), stream=stream0)
del buf86
del primals_37
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_18, x_26], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_34.run(buf99, primals_39, buf100, 262144, grid=grid(262144), stream=stream0)
buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_35.run(buf100, buf99, primals_39, buf12, buf101, 524288, grid=grid(524288), stream=stream0)
del buf99
del primals_39
# Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution]
buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_19, x_27], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_34.run(buf102, primals_41, buf103, 262144, grid=grid(262144), stream=stream0)
buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_36.run(buf104, 64, grid=grid(64), stream=stream0)
buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_37.run(buf105, 64, grid=grid(64), stream=stream0)
buf106 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_36.run(buf106, 64, grid=grid(64), stream=stream0)
buf107 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_37.run(buf107, 64, grid=grid(64), stream=stream0)
buf110 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_38.run(buf110, 64, grid=grid(64), stream=stream0)
buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_38.run(buf112, 64, grid=grid(64), stream=stream0)
buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
buf113 = buf109; del buf109 # reuse
buf114 = buf113; del buf113 # reuse
# Topologically Sorted Source Nodes: [conv2d_19, x_27, x_28], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39.run(buf114, buf105, buf106, buf103, buf102, primals_41, buf104, buf107, buf110, buf112, 1048576, grid=grid(1048576), stream=stream0)
del buf102
del primals_41
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_20, x_29], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_40.run(buf115, primals_43, buf116, 524288, grid=grid(524288), stream=stream0)
buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_41.run(buf116, buf115, primals_43, buf5, buf117, 1048576, grid=grid(1048576), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf120 = buf115; del buf115 # reuse
# Topologically Sorted Source Nodes: [conv2d_21, x_30], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf118, primals_45, buf119, buf120, 524288, grid=grid(524288), stream=stream0)
del buf118
del primals_45
# Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution]
buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool)
buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64, 1), 0); del buf70 # reuse
# Topologically Sorted Source Nodes: [conv2d_22, x_31], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_42.run(buf121, primals_47, buf122, buf123, 65536, grid=grid(65536), stream=stream0)
del buf121
del primals_47
return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30, buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42, buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57, buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72, buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87, buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101, buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114, buf116, buf117, buf119, buf120, buf122, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 7, 7), (1568, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((4, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
import torch.nn.parallel
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.avg_pool2d(x, 2)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
return x
class up(nn.Module):
"""
A class for creating neural network blocks containing layers:
Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x, skpCn)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used for setting input and output channels for
the second convolutional layer.
"""
super(up, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1,
padding=1)
def forward(self, x, skpCn):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
skpCn : tensor
skip connection input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners
=True)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)),
negative_slope=0.1)
return x
class UNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(UNet, self).__init__()
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope=0.1)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'inChannels': 4, 'outChannels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn import functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_14(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x6 = xindex // 16
x2 = xindex // 16 % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 1024
x0 = xindex % 16
x2 = xindex // 16384
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0).to(tl
.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 1024, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 16 * (-512 + x1) + 8192 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_20(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x6 = xindex // 64
x2 = xindex // 64 % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 512
x0 = xindex % 64
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0).to(
tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 7, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_26(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x6 = xindex // 256
x2 = xindex // 256 % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0).to(
tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 15, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_32(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x6 = xindex // 1024
x2 = xindex // 1024 % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 128
x0 = xindex % 1024
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0
).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 31, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_38(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x6 = xindex // 4096
x2 = xindex // 4096 % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0
).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_29, (512,), (1,))
assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (256,), (1,))
assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_35, (128,), (1,))
assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (64,), (1,))
assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (64,), (1,))
assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_43, (32,), (1,))
assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (32,), (1,))
assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0,
primals_2, buf1, buf2, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf3,
primals_5, buf4, buf5, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
triton_poi_fused_avg_pool2d_1[grid(131072)](buf5, buf6, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf7,
primals_7, buf8, buf9, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf12 = buf7
del buf7
triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf10,
primals_9, buf11, buf12, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_9
buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
triton_poi_fused_avg_pool2d_3[grid(65536)](buf12, buf13, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1))
buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf14,
primals_11, buf15, buf16, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_11
buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1))
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf19 = buf14
del buf14
triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf17,
primals_13, buf18, buf19, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_13
buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
triton_poi_fused_avg_pool2d_5[grid(32768)](buf19, buf20, 32768,
XBLOCK=256, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1))
buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf21,
primals_15, buf22, buf23, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_15
buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf26 = buf21
del buf21
triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf24,
primals_17, buf25, buf26, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_17
buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused_avg_pool2d_7[grid(16384)](buf26, buf27, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1))
buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf28,
primals_19, buf29, buf30, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_19
buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1))
buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
buf33 = buf28
del buf28
triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf31,
primals_21, buf32, buf33, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_21
buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.
float32)
triton_poi_fused_avg_pool2d_9[grid(8192)](buf33, buf34, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1))
buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_10[grid(8192)](buf35,
primals_23, buf36, buf37, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del buf35
del primals_23
buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1))
buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_11[grid(8192)](buf38,
primals_25, buf39, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_12[grid(4)](buf40, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_13[grid(4)](buf41, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_12[grid(4)](buf42, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf43 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_add_clamp_13[grid(4)](buf43, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf46 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_14[grid(4)](buf46, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_14[grid(4)](buf48, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf45 = buf31
del buf31
buf49 = buf45
del buf45
buf50 = buf49
del buf49
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15[
grid(32768)](buf50, buf41, buf42, buf39, buf38, primals_25,
buf40, buf43, buf46, buf48, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf38
del primals_25
buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1))
buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf51,
primals_27, buf52, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0
)
del buf24
triton_poi_fused_cat_17[grid(65536)](buf52, buf51, primals_27,
buf33, buf53, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del buf51
del primals_27
buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1))
buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf54,
primals_29, buf55, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_18[grid(8)](buf56, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_19[grid(8)](buf57, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf58 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_18[grid(8)](buf58, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf59 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused_add_clamp_19[grid(8)](buf59, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf62 = empty_strided_cuda((8,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_20[grid(8)](buf62, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_20[grid(8)](buf64, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0)
del buf17
buf65 = buf61
del buf61
buf66 = buf65
del buf65
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21[
grid(131072)](buf66, buf57, buf58, buf55, buf54, primals_29,
buf56, buf59, buf62, buf64, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del buf54
del primals_29
buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1))
buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf67,
primals_31, buf68, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_23[grid(131072)](buf68, buf67, primals_31,
buf26, buf69, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del buf67
del primals_31
buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1))
buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf70,
primals_33, buf71, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_24[grid(16)](buf72, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_25[grid(16)](buf73, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf74 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_24[grid(16)](buf74, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf75 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused_add_clamp_25[grid(16)](buf75, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf78 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_26[grid(16)](buf78,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_26[grid(16)](buf80,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16,
1), 0)
del buf10
buf81 = buf77
del buf77
buf82 = buf81
del buf81
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27[
grid(262144)](buf82, buf73, buf74, buf71, buf70, primals_33,
buf72, buf75, buf78, buf80, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_33
buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1))
buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf83,
primals_35, buf84, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.float32)
triton_poi_fused_cat_29[grid(262144)](buf84, buf83, primals_35,
buf19, buf85, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del buf83
del primals_35
buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1))
buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf86,
primals_37, buf87, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_30[grid(32)](buf88, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_31[grid(32)](buf89, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf90 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_30[grid(32)](buf90, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf91 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_add_clamp_31[grid(32)](buf91, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf94 = empty_strided_cuda((32,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_32[grid(32)](buf94,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_32[grid(32)](buf96,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024,
32, 1), 0)
del buf3
buf97 = buf93
del buf93
buf98 = buf97
del buf97
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33[
grid(524288)](buf98, buf89, buf90, buf87, buf86, primals_37,
buf88, buf91, buf94, buf96, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf86
del primals_37
buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf99,
primals_39, buf100, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
triton_poi_fused_cat_35[grid(524288)](buf100, buf99, primals_39,
buf12, buf101, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
del buf99
del primals_39
buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf102,
primals_41, buf103, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_36[grid(64)](buf104, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_37[grid(64)](buf105, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf106 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_36[grid(64)](buf106, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf107 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_37[grid(64)](buf107, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf110 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_38[grid(64)](buf110,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_38[grid(64)](buf112,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf113 = buf109
del buf109
buf114 = buf113
del buf113
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39[
grid(1048576)](buf114, buf105, buf106, buf103, buf102,
primals_41, buf104, buf107, buf110, buf112, 1048576, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf102
del primals_41
buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_40[grid(524288)](buf115,
primals_43, buf116, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_41[grid(1048576)](buf116, buf115, primals_43,
buf5, buf117, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf120 = buf115
del buf115
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf118,
primals_45, buf119, buf120, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf118
del primals_45
buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64,
1), 0)
del buf70
triton_poi_fused_convolution_leaky_relu_42[grid(65536)](buf121,
primals_47, buf122, buf123, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf121
del primals_47
return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4,
buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18,
buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30,
buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42,
buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57,
buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72,
buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87,
buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101,
buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114,
buf116, buf117, buf119, buf120, buf122)
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.avg_pool2d(x, 2)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
return x
class up(nn.Module):
"""
A class for creating neural network blocks containing layers:
Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x, skpCn)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used for setting input and output channels for
the second convolutional layer.
"""
super(up, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1,
padding=1)
def forward(self, x, skpCn):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
skpCn : tensor
skip connection input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners
=True)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)),
negative_slope=0.1)
return x
class UNetNew(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(UNetNew, self).__init__()
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.down1.conv1.weight
primals_7 = self.down1.conv1.bias
primals_8 = self.down1.conv2.weight
primals_9 = self.down1.conv2.bias
primals_10 = self.down2.conv1.weight
primals_11 = self.down2.conv1.bias
primals_12 = self.down2.conv2.weight
primals_13 = self.down2.conv2.bias
primals_14 = self.down3.conv1.weight
primals_15 = self.down3.conv1.bias
primals_16 = self.down3.conv2.weight
primals_17 = self.down3.conv2.bias
primals_18 = self.down4.conv1.weight
primals_19 = self.down4.conv1.bias
primals_20 = self.down4.conv2.weight
primals_21 = self.down4.conv2.bias
primals_22 = self.down5.conv1.weight
primals_23 = self.down5.conv1.bias
primals_24 = self.down5.conv2.weight
primals_25 = self.down5.conv2.bias
primals_26 = self.up1.conv1.weight
primals_27 = self.up1.conv1.bias
primals_28 = self.up1.conv2.weight
primals_29 = self.up1.conv2.bias
primals_30 = self.up2.conv1.weight
primals_31 = self.up2.conv1.bias
primals_32 = self.up2.conv2.weight
primals_33 = self.up2.conv2.bias
primals_34 = self.up3.conv1.weight
primals_35 = self.up3.conv1.bias
primals_36 = self.up3.conv2.weight
primals_37 = self.up3.conv2.bias
primals_38 = self.up4.conv1.weight
primals_39 = self.up4.conv1.bias
primals_40 = self.up4.conv2.weight
primals_41 = self.up4.conv2.bias
primals_42 = self.up5.conv1.weight
primals_43 = self.up5.conv1.bias
primals_44 = self.up5.conv2.weight
primals_45 = self.up5.conv2.bias
primals_46 = self.conv3.weight
primals_47 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47])
return output[0]
| DA4EVENT/home | UNet | false | 17,387 | [
"MIT"
] | 5 | 18cc93a795ce132e05b886aa34565a102915b1c6 | https://github.com/DA4EVENT/home/tree/18cc93a795ce132e05b886aa34565a102915b1c6 |
CumMax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/vo/cvoraghc5qf55zcfhhaehkbcu44d2jjp2ycccoqvypsanxgsntku.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rp/crpb4gmryyvzi667r2r6q7jvsccpt3qnvatmlptwouz3pazckdk6.py
# Topologically Sorted Source Nodes: [softmax, cumsum], Original ATen: [aten._softmax, aten.cumsum]
# Source node to ATen node mapping:
# cumsum => cumsum
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%div, -1), kwargs = {})
triton_per_fused__softmax_cumsum_1 = async_compile.triton('triton_per_fused__softmax_cumsum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_cumsum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_cumsum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = tmp8.to(tl.float32)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp11, = tl.associative_scan((tmp10,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, cumsum], Original ATen: [aten._softmax, aten.cumsum]
triton_per_fused__softmax_cumsum_1.run(buf0, buf1, 64, 4, grid=grid(64), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CumMax(nn.Module):
def __init__(self):
super(CumMax, self).__init__()
def forward(self, input):
return torch.cumsum(nn.Softmax(dim=-1)(input), -1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused__softmax_cumsum_1(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = tmp8.to(tl.float32)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp11, = tl.associative_scan((tmp10,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__softmax_cumsum_1[grid(64)](buf0, buf1, 64, 4,
XBLOCK=32, num_warps=2, num_stages=1)
del buf0
return buf1,
class CumMaxNew(nn.Module):
def __init__(self):
super(CumMaxNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hritikbansal/RNNs_SVA_OOD | CumMax | false | 17,388 | [
"MIT"
] | 4 | a1c73955342d9d35c49da5fcb7b315e37b0f75d1 | https://github.com/Hritikbansal/RNNs_SVA_OOD/tree/a1c73955342d9d35c49da5fcb7b315e37b0f75d1 |
PositionWiseFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/y7/cy75pv62flzgueapzlnlplljec2lmbrjxeuhmgi2gb5pjyl6sna4.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ur/curhmk5y5q3p75evjihs7kgjnfdmg2wtto34yur6beusgb5lyduh.py
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv1d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/r2/cr2h3252vsveia733y3hc44mzgu7rann2uwjgvo4xn54fam36ec5.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yp/cypluxg5d6vturel4ikq2mu54xs5n2345irukminytpmfufku64b.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, clone_1, rsqrt, var_mean
# Graph fragment:
# %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_3 = async_compile.triton('triton_poi_fused_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qt/cqtpy4jtvipgidqqiaao6wknxod2c6g5bbldlqifeprxvi7yot33.py
# Topologically Sorted Source Nodes: [layer_norm, output_4], Original ATen: [aten.native_layer_norm, aten.add]
# Source node to ATen node mapping:
# layer_norm => add, add_1, clone_1, mul, mul_1, rsqrt, sub, var_mean
# output_4 => add_2
# Graph fragment:
# %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_1), kwargs = {})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (y3), ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = tmp0 + tmp9
tl.store(out_ptr0 + (x2 + (4*y3)), tmp10, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_3.run(buf4, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [layer_norm, output_4], Original ATen: [aten.native_layer_norm, aten.add]
triton_poi_fused_add_native_layer_norm_4.run(primals_1, buf4, buf5, buf6, primals_6, primals_7, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
del buf5
del buf6
del primals_7
return (buf7, primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import functional as F
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.modules.module
class PositionWiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1)
self.w_2 = nn.Conv1d(d_hid, d_in, 1)
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
Merge adjacent information. Equal to linear layer if kernel size is 1
Args:
x (bsz, len, dim)
Returns:
output (bsz, len, dim)
"""
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = residual + self.layer_norm(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_hid': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
import torch.nn.modules.module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = tmp0 + tmp9
tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_3[grid(16)](buf4, buf5, buf6, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf7 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](primals_1,
buf4, buf5, buf6, primals_6, primals_7, buf7, 16, 4, XBLOCK=2,
YBLOCK=16, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_7
return buf7, primals_2, primals_4, primals_6, reinterpret_tensor(primals_1,
(4, 4, 4), (16, 1, 4), 0), buf2, buf4
class PositionWiseFeedForwardNew(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1)
self.w_2 = nn.Conv1d(d_hid, d_in, 1)
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ChCh1999/RTPB | PositionWiseFeedForward | false | 17,389 | [
"MIT"
] | 8 | 1066a3bfe4fe1b41eff74fd152936880302a60a2 | https://github.com/ChCh1999/RTPB/tree/1066a3bfe4fe1b41eff74fd152936880302a60a2 |
FastRCNNPredictor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/u4/cu4tu54ulpihko3y3wkmsiw7rwsbh3a7bvh4dt2i4vyjmq6t5qcz.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.add]
# Source node to ATen node mapping:
# scores => add
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, 0.0), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [bbox_deltas], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return (buf1, buf2, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data
from torchvision.transforms import functional as F
class FastRCNNPredictor(nn.Module):
"""
Standard classification + bounding box regression layers
for Fast R-CNN.
Arguments:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
def __init__(self, in_channels, num_classes, n_fc_classif_layers=1,
dropout=0.1, batched_nms=True):
super(FastRCNNPredictor, self).__init__()
self.n_fc_classif_layers = n_fc_classif_layers
self.batched_nms = batched_nms
self.fc_classif_layers = {i: nn.Linear(in_channels, in_channels) for
i in range(n_fc_classif_layers - 1)}
self.dropout = nn.Dropout(dropout)
self.cls_score = nn.Linear(in_channels, num_classes)
if self.batched_nms:
self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
else:
self.bbox_pred = nn.Linear(in_channels, 2 * 4)
def forward(self, x, get_scores=True, get_deltas=True):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.flatten(start_dim=1)
if get_scores:
scores = x + 0.0
for lno, layer in self.fc_classif_layers.items():
scores = F.relu(layer(scores))
scores = self.dropout(scores)
scores = self.cls_score(scores)
else:
scores = None
bbox_deltas = self.bbox_pred(x) if get_deltas else None
return scores, bbox_deltas
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, buf0, reinterpret_tensor(primals_2,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return buf1, buf2, primals_1, buf0
class FastRCNNPredictorNew(nn.Module):
"""
Standard classification + bounding box regression layers
for Fast R-CNN.
Arguments:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
def __init__(self, in_channels, num_classes, n_fc_classif_layers=1,
dropout=0.1, batched_nms=True):
super(FastRCNNPredictorNew, self).__init__()
self.n_fc_classif_layers = n_fc_classif_layers
self.batched_nms = batched_nms
self.fc_classif_layers = {i: nn.Linear(in_channels, in_channels) for
i in range(n_fc_classif_layers - 1)}
self.dropout = nn.Dropout(dropout)
self.cls_score = nn.Linear(in_channels, num_classes)
if self.batched_nms:
self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
else:
self.bbox_pred = nn.Linear(in_channels, 2 * 4)
def forward(self, input_0):
primals_1 = self.cls_score.weight
primals_3 = self.cls_score.bias
primals_4 = self.bbox_pred.weight
primals_5 = self.bbox_pred.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| CancerDataScience/NuCLS | FastRCNNPredictor | false | 17,390 | [
"MIT"
] | 7 | c172b55b18d4ea78c3f51a8fd28ee6c2595c8360 | https://github.com/CancerDataScience/NuCLS/tree/c172b55b18d4ea78c3f51a8fd28ee6c2595c8360 |
ImageDiscriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/jy/cjyvkkorvu5lxphkt3zjteh6nwmxumq4s3qj2rtqsh73m6miyq4s.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bb/cbbij33y265dfwawonrhp33ztf5z3gcco43rhqevaow6bec55cht.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# out_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/df/cdf45pxt4ect3gzzvssbc4a2hubi6nchyu2jnnhifuly3ouo3lel.py
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# out_2 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 512) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zk/czkb5io6vq23imk2uv5wtn3zig36pwuqw2me45yfrkmwgo7zjtku.py
# Topologically Sorted Source Nodes: [conv2d_3, out_3], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# out_3 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/f7/cf7pilcd5azc65umfxrkuyzzdkjsc22nslkxyccmarerk2ssbvxb.py
# Topologically Sorted Source Nodes: [conv2d_4, out_4], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# out_4 => sigmoid
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [2, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_sigmoid_4 = async_compile.triton('triton_poi_fused_convolution_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/tm/ctmh2snjzljd5qt6kio2wplkbcuxot36f5mcgcd4mzhxqb2oj2n5.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# out_5 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%sigmoid, [1, 25], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_5 = async_compile.triton('triton_poi_fused_avg_pool2d_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 25, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (32*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (32*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (32*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (3 + x0 + (32*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + (32*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (5 + x0 + (32*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (6 + x0 + (32*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (7 + x0 + (32*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0 + (32*x1)), xmask)
tmp17 = tl.load(in_ptr0 + (9 + x0 + (32*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (10 + x0 + (32*x1)), xmask)
tmp21 = tl.load(in_ptr0 + (11 + x0 + (32*x1)), xmask)
tmp23 = tl.load(in_ptr0 + (12 + x0 + (32*x1)), xmask)
tmp25 = tl.load(in_ptr0 + (13 + x0 + (32*x1)), xmask)
tmp27 = tl.load(in_ptr0 + (14 + x0 + (32*x1)), xmask)
tmp29 = tl.load(in_ptr0 + (15 + x0 + (32*x1)), xmask)
tmp31 = tl.load(in_ptr0 + (16 + x0 + (32*x1)), xmask)
tmp33 = tl.load(in_ptr0 + (17 + x0 + (32*x1)), xmask)
tmp35 = tl.load(in_ptr0 + (18 + x0 + (32*x1)), xmask)
tmp37 = tl.load(in_ptr0 + (19 + x0 + (32*x1)), xmask)
tmp39 = tl.load(in_ptr0 + (20 + x0 + (32*x1)), xmask)
tmp41 = tl.load(in_ptr0 + (21 + x0 + (32*x1)), xmask)
tmp43 = tl.load(in_ptr0 + (22 + x0 + (32*x1)), xmask)
tmp45 = tl.load(in_ptr0 + (23 + x0 + (32*x1)), xmask)
tmp47 = tl.load(in_ptr0 + (24 + x0 + (32*x1)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + (x2), tmp50, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (64, 6, 3, 3), (54, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 6, 128, 128), (98304, 16384, 128, 1))
assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (1, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 1048576, grid=grid(1048576), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf4 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.bool)
buf5 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 16, 32), (131072, 512, 32, 1))
buf7 = empty_strided_cuda((4, 256, 16, 32), (131072, 512, 32, 1), torch.bool)
buf8 = reinterpret_tensor(buf3, (4, 256, 16, 32), (131072, 512, 32, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf6, primals_7, buf7, buf8, 524288, grid=grid(524288), stream=stream0)
del buf6
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(2, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 256, 8, 32), (65536, 256, 32, 1))
buf10 = empty_strided_cuda((4, 256, 8, 32), (65536, 256, 32, 1), torch.bool)
buf11 = empty_strided_cuda((4, 256, 8, 32), (65536, 256, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, out_3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf9, primals_9, buf10, buf11, 262144, grid=grid(262144), stream=stream0)
del buf9
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_10, stride=(2, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 4, 32), (128, 128, 32, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, out_4], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_4.run(buf13, primals_11, 512, grid=grid(512), stream=stream0)
del primals_11
buf14 = empty_strided_cuda((4, 1, 4, 8), (32, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_5.run(buf13, buf14, 128, grid=grid(128), stream=stream0)
return (reinterpret_tensor(buf14, (128, ), (1, ), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 6, 3, 3), (54, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 6, 128, 128), (98304, 16384, 128, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class ImageDiscriminator(nn.Module):
def __init__(self):
super(ImageDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels=6, out_channels=64, kernel_size=
3, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=(2, 1), padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=(2, 1), padding=1)
self.conv5 = nn.Conv2d(in_channels=256, out_channels=1, kernel_size
=3, stride=(2, 1), padding=1)
self.sig5 = nn.Sigmoid()
self.avg_pool = nn.AvgPool2d(kernel_size=(1, 25), stride=1)
self._do_initializer()
def _do_initializer(self):
for module in self.modules():
if isinstance(module, nn.Conv2d):
nn.init.normal_(tensor=module.weight, mean=0, std=0.01)
def forward(self, inputs):
out = F.leaky_relu(self.conv1(inputs), negative_slope=0.2)
out = F.leaky_relu(self.conv2(out), negative_slope=0.2)
out = F.leaky_relu(self.conv3(out), negative_slope=0.2)
out = F.leaky_relu(self.conv4(out), negative_slope=0.2)
out = self.sig5(self.conv5(out))
out = self.avg_pool(out)
out = out.view(-1)
return out
def get_inputs():
return [torch.rand([4, 6, 128, 128])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 512 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 32 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 32 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (3 + x0 + 32 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + 32 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (5 + x0 + 32 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (6 + x0 + 32 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (7 + x0 + 32 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0 + 32 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (9 + x0 + 32 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (10 + x0 + 32 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (11 + x0 + 32 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (12 + x0 + 32 * x1), xmask)
tmp25 = tl.load(in_ptr0 + (13 + x0 + 32 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (14 + x0 + 32 * x1), xmask)
tmp29 = tl.load(in_ptr0 + (15 + x0 + 32 * x1), xmask)
tmp31 = tl.load(in_ptr0 + (16 + x0 + 32 * x1), xmask)
tmp33 = tl.load(in_ptr0 + (17 + x0 + 32 * x1), xmask)
tmp35 = tl.load(in_ptr0 + (18 + x0 + 32 * x1), xmask)
tmp37 = tl.load(in_ptr0 + (19 + x0 + 32 * x1), xmask)
tmp39 = tl.load(in_ptr0 + (20 + x0 + 32 * x1), xmask)
tmp41 = tl.load(in_ptr0 + (21 + x0 + 32 * x1), xmask)
tmp43 = tl.load(in_ptr0 + (22 + x0 + 32 * x1), xmask)
tmp45 = tl.load(in_ptr0 + (23 + x0 + 32 * x1), xmask)
tmp47 = tl.load(in_ptr0 + (24 + x0 + 32 * x1), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + x2, tmp50, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (64, 6, 3, 3), (54, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 6, 128, 128), (98304, 16384, 128, 1))
assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (1, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf0,
primals_2, buf1, buf2, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf0
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf4 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(524288)](buf3,
primals_5, buf4, buf5, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 16, 32), (131072, 512, 32, 1))
buf7 = empty_strided_cuda((4, 256, 16, 32), (131072, 512, 32, 1),
torch.bool)
buf8 = reinterpret_tensor(buf3, (4, 256, 16, 32), (131072, 512, 32,
1), 0)
del buf3
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf6,
primals_7, buf7, buf8, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(2, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 256, 8, 32), (65536, 256, 32, 1))
buf10 = empty_strided_cuda((4, 256, 8, 32), (65536, 256, 32, 1),
torch.bool)
buf11 = empty_strided_cuda((4, 256, 8, 32), (65536, 256, 32, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_3[grid(262144)](buf9,
primals_9, buf10, buf11, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf9
del primals_9
buf12 = extern_kernels.convolution(buf11, primals_10, stride=(2, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 4, 32), (128, 128, 32, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_sigmoid_4[grid(512)](buf13, primals_11,
512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf14 = empty_strided_cuda((4, 1, 4, 8), (32, 32, 8, 1), torch.float32)
triton_poi_fused_avg_pool2d_5[grid(128)](buf13, buf14, 128, XBLOCK=
128, num_warps=4, num_stages=1)
return (reinterpret_tensor(buf14, (128,), (1,), 0), primals_1,
primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2,
buf4, buf5, buf7, buf8, buf10, buf11, buf13)
class ImageDiscriminatorNew(nn.Module):
def __init__(self):
super(ImageDiscriminatorNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=6, out_channels=64, kernel_size=
3, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=(2, 1), padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=(2, 1), padding=1)
self.conv5 = nn.Conv2d(in_channels=256, out_channels=1, kernel_size
=3, stride=(2, 1), padding=1)
self.sig5 = nn.Sigmoid()
self.avg_pool = nn.AvgPool2d(kernel_size=(1, 25), stride=1)
self._do_initializer()
def _do_initializer(self):
for module in self.modules():
if isinstance(module, nn.Conv2d):
nn.init.normal_(tensor=module.weight, mean=0, std=0.01)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| HotaekHan/Synthetically_Supervised_Text_Recognition | ImageDiscriminator | false | 17,391 | [
"MIT"
] | 8 | a6bb7d3039b1280c6efe177b69d8b985d2e13285 | https://github.com/HotaekHan/Synthetically_Supervised_Text_Recognition/tree/a6bb7d3039b1280c6efe177b69d8b985d2e13285 |
MAXATTN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/s3/cs3il7r6anfx6zarlhuu6imzmbui4ndk2uips3el2ovzsgd7j4kr.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%primals_1, 0, True), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/j4/cj4cmuwviyh65knxxhcuzdklxwnqrt7pckw32lmz2w6ltwty7752.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fp/cfp6aijow2g5is2smjrqj7kca6itit4xrxs2tseqvxiqv3qu5hs6.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vk/cvkegrqrqvgc6l3q574j6alpmoropwpd76xcxahk7k7j2fjfbppc.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7k/c7k2vs5lxkggoyfeeteavtgn7a4q3nedglx7jnarbdpnj26j467h.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_12,), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/w4/cw474y3r7ni2jyljgto7vevgfgcy6cufyamoraokj5i5uqxeg25w.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (1, 4), (0, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf3)
del primals_2
buf4 = reinterpret_tensor(buf1, (4, 1, 1), (1, 4, 4), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf4, primals_3, 4, grid=grid(4), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 16, grid=grid(16), stream=stream0)
buf8 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf8, (1, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(buf9, primals_5, buf10, 16, grid=grid(16), stream=stream0)
del buf9
del primals_5
buf11 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf7, buf11, 16, grid=grid(16), stream=stream0)
return (buf10, reinterpret_tensor(buf11, (1, 16), (16, 1), 0), primals_1, buf7, reinterpret_tensor(buf8, (1, 4), (1, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MAXATTN(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MAXATTN, self).__init__()
self.attention_layer = nn.MultiheadAttention(embed_dim, num_heads)
def forward(self, hidden, key=None, value=None):
T = hidden.size(0)
query = torch.max(hidden, dim=0, keepdim=True)[0]
out, weight = self.attention_layer(query, hidden, hidden)
return torch.cat([out for i in range(T)], dim=0), torch.cat([weight for
i in range(T)], dim=1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(4)](primals_1, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (1, 4), (0, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf3)
del primals_2
buf4 = reinterpret_tensor(buf1, (4, 1, 1), (1, 4, 4), 0)
del buf1
triton_poi_fused_mul_1[grid(4)](buf4, primals_3, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1,
4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_3[grid(16)](buf6, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
extern_kernels.bmm(buf7, reinterpret_tensor(buf3, (4, 4, 1), (1, 4,
1), 0), out=buf8)
buf9 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (1, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
triton_poi_fused_cat_4[grid(16)](buf9, primals_5, buf10, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del buf9
del primals_5
buf11 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_cat_5[grid(16)](buf7, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf10, reinterpret_tensor(buf11, (1, 16), (16, 1), 0
), primals_1, buf7, reinterpret_tensor(buf8, (1, 4), (1, 1), 0
), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf4, (4, 1, 1), (1, 1, 1), 0
), reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0)
class MAXATTNNew(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MAXATTNNew, self).__init__()
self.attention_layer = nn.MultiheadAttention(embed_dim, num_heads)
def forward(self, input_0):
primals_2 = self.attention_layer.in_proj_weight
primals_3 = self.attention_layer.in_proj_bias
primals_1 = self.attention_layer.out_proj.weight
primals_5 = self.attention_layer.out_proj.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| Hritikbansal/RNNs_SVA_OOD | MAXATTN | false | 17,392 | [
"MIT"
] | 4 | a1c73955342d9d35c49da5fcb7b315e37b0f75d1 | https://github.com/Hritikbansal/RNNs_SVA_OOD/tree/a1c73955342d9d35c49da5fcb7b315e37b0f75d1 |
UIAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/zm/czm6acrrgjryz6xi3wza7npycjuiqsdsygpfdo3lbzaquecrmeuj.py
# Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# inputs => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/f2/cf2ng44w4cl4xkxyxthmcmlgufktsmlvp6vpydix77kwotfd5rg4.py
# Topologically Sorted Source Nodes: [output_1, atten], Original ATen: [aten.relu, aten._softmax]
# Source node to ATen node mapping:
# atten => amax, exp, sub
# output_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%addmm,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_relu_1 = async_compile.triton('triton_poi_fused__softmax_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/22/c22qssgzf2vzcbrryv56fhxj53igzto6xkomlrxmc4ptp4z6rsol.py
# Topologically Sorted Source Nodes: [atten, output_2], Original ATen: [aten._softmax, aten.mul]
# Source node to ATen node mapping:
# atten => div, sum_1
# output_2 => mul
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {})
triton_poi_fused__softmax_mul_2 = async_compile.triton('triton_poi_fused__softmax_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tmp1 / tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1, atten], Original ATen: [aten.relu, aten._softmax]
triton_poi_fused__softmax_relu_1.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [atten, output_2], Original ATen: [aten._softmax, aten.mul]
triton_poi_fused__softmax_mul_2.run(primals_1, buf2, buf3, 16, grid=grid(16), stream=stream0)
del buf2
return (buf3, primals_1, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class UIAttention(nn.Module):
def __init__(self, latent_dim, att_size):
super(UIAttention, self).__init__()
self.dense = nn.Linear(in_features=latent_dim * 2, out_features=
att_size)
nn.init.xavier_normal_(self.dense.weight.data)
self.lam = lambda x: F.softmax(x, dim=1)
def forward(self, input, path_output):
inputs = torch.cat((input, path_output), 1)
output = self.dense(inputs)
output = torch.relu(output)
atten = self.lam(output)
output = input * atten
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'latent_dim': 4, 'att_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tmp1 / tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_relu_1[grid(16)](buf1, buf2, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_mul_2[grid(16)](primals_1, buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf2
return buf3, primals_1, buf0, buf1
class UIAttentionNew(nn.Module):
def __init__(self, latent_dim, att_size):
super(UIAttentionNew, self).__init__()
self.dense = nn.Linear(in_features=latent_dim * 2, out_features=
att_size)
nn.init.xavier_normal_(self.dense.weight.data)
self.lam = lambda x: F.softmax(x, dim=1)
def forward(self, input_0, input_1):
primals_3 = self.dense.weight
primals_4 = self.dense.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Hui-Li/MCRec_PyTorch | UIAttention | false | 17,393 | [
"MIT"
] | 9 | da4da77d2cade40c0a1961481c8e47ac396d12ee | https://github.com/Hui-Li/MCRec_PyTorch/tree/da4da77d2cade40c0a1961481c8e47ac396d12ee |
GAT | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cn/ccn4qol6grwbb3gasxa7ibg3vjb7ieor7qdp6swijguqqbukwccp.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %repeat_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*((((4*x1) + x0) // 16) % 4)) + ((((4*x1) + x0) % 16) % 4)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + ((4*(x1 % 4)) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4r/c4r7qkyes5rbryciwaphgmxru6ck7iweqdesggebzgrgp5ryzwzx.py
# Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# e => gt
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kg/ckgsbli2cqflpwmdemarltfcgvccyfff4hbwg2vkr2e4kg2nfwfz.py
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention => where_1
# attention_1 => amax, exp, sub, sum_1
# attention_10 => amax_3, exp_3, sub_3, sum_4
# attention_3 => where_4
# attention_4 => amax_1, exp_1, sub_1, sum_2
# attention_6 => where_7
# attention_7 => amax_2, exp_2, sub_2, sum_3
# attention_9 => where_10
# e => mul, where
# e_1 => mul_5, where_3
# e_2 => mul_10, where_6
# e_3 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {})
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_4, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_7, [1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_10, [1], True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_2 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 36, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp41 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp46 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp52 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp58 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr5 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp75 = tl.load(in_ptr6 + (4*x0), xmask, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr5 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp80 = tl.load(in_ptr6 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr5 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp86 = tl.load(in_ptr6 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr5 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp92 = tl.load(in_ptr6 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr7 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp109 = tl.load(in_ptr8 + (4*x0), xmask, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr7 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp114 = tl.load(in_ptr8 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp119 = tl.load(in_ptr7 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp120 = tl.load(in_ptr8 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp125 = tl.load(in_ptr7 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp126 = tl.load(in_ptr8 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp39, xmask)
tl.store(out_ptr2 + (x0), tmp62, xmask)
tl.store(out_ptr3 + (x0), tmp73, xmask)
tl.store(out_ptr4 + (x0), tmp96, xmask)
tl.store(out_ptr5 + (x0), tmp107, xmask)
tl.store(out_ptr6 + (x0), tmp130, xmask)
tl.store(out_ptr7 + (x0), tmp141, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pv/cpvillqqnrhmtlvupjtc2katvjlr2witoru7t5pjarltgdz5pyra.py
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention => where_1
# attention_1 => div, exp, sub
# attention_10 => div_3, exp_3, sub_3
# attention_3 => where_4
# attention_4 => div_1, exp_1, sub_1
# attention_6 => where_7
# attention_7 => div_2, exp_2, sub_2
# attention_9 => where_10
# e => mul, where
# e_1 => mul_5, where_3
# e_2 => mul_10, where_6
# e_3 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {})
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_3), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_4), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_3 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*i1', 6: '*fp32', 7: '*fp32', 8: '*i1', 9: '*fp32', 10: '*fp32', 11: '*i1', 12: '*fp32', 13: '*fp32', 14: '*i1', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_3', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + (x2), xmask).to(tl.int1)
tmp14 = tl.load(in_out_ptr1 + (x2), xmask)
tmp18 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr7 + (x2), xmask).to(tl.int1)
tmp24 = tl.load(in_out_ptr2 + (x2), xmask)
tmp28 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr9 + (x1), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr10 + (x2), xmask).to(tl.int1)
tmp34 = tl.load(in_out_ptr3 + (x2), xmask)
tmp38 = tl.load(in_ptr11 + (x1), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr12 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(in_out_ptr0 + (x2), tmp12, xmask)
tl.store(in_out_ptr1 + (x2), tmp22, xmask)
tl.store(in_out_ptr2 + (x2), tmp32, xmask)
tl.store(in_out_ptr3 + (x2), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hh/chh6w2ghklyzywtaoo3lcd3onberm3miuk3c3djkdig4mqokonfe.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat_4
# Graph fragment:
# %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_2, %where_5, %where_8, %where_11], 1), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tmp40 = tl.full([1], 16, tl.int64)
tmp41 = tmp0 < tmp40
tmp42 = tl.load(in_ptr3 + ((4*x1) + ((-12) + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + (x2), tmp52, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ls/clsh2ie5vs3nrezepc6wdf5phem3l34645sxafim53m4q6ymeptn.py
# Topologically Sorted Source Nodes: [zero_vec, e_4, attention_12, attention_13], Original ATen: [aten.mul, aten.leaky_relu, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention_12 => where_13
# attention_13 => amax_4, exp_4, sub_4, sum_5
# e_4 => mul_20, where_12
# zero_vec => full_default
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_4, 4), kwargs = {})
# %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %squeeze_4, %mul_20), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_12, %full_default), kwargs = {})
# %amax_4 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_13, [1], True), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_13, %amax_4), kwargs = {})
# %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [1], True), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_5 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uk/cuks2diyi23xuwjwtsqsucw24cskpggghxl4amhn3lierlyjjg7h.py
# Topologically Sorted Source Nodes: [zero_vec, e_4, attention_12, attention_13], Original ATen: [aten.mul, aten.leaky_relu, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention_12 => where_13
# attention_13 => div_4, exp_4, sub_4
# e_4 => mul_20, where_12
# zero_vec => full_default
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_4, 4), kwargs = {})
# %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %squeeze_4, %mul_20), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_12, %full_default), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_13, %amax_4), kwargs = {})
# %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_4, %sum_5), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_6 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*i1', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tl.store(in_out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/iv/civo4hlpx3sxs5ymmdkyhclqdjg4r62v6xgxexchxcyxyht6fasp.py
# Topologically Sorted Source Nodes: [embed1], Original ATen: [aten.zeros]
# Source node to ATen node mapping:
# embed1 => full_5
# Graph fragment:
# %full_5 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_zeros_7 = async_compile.triton('triton_poi_fused_zeros_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_zeros_7(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ii/ciirqyhqzonjd6mmxnrcmh7nz3ugitutkzxafzeco4wcbdlilc3t.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# x_3 => expm1_4, gt_14, mul_22, mul_24, where_14
# Graph fragment:
# %gt_14 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mm_14, 0), kwargs = {})
# %mul_22 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_14, 1.0), kwargs = {})
# %expm1_4 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_22,), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_4, 1.0), kwargs = {})
# %where_14 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_14, %mul_22, %mul_24), kwargs = {})
triton_poi_fused_elu_8 = async_compile.triton('triton_poi_fused_elu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5s/c5sz4cpmwv6i53sc2ec3cgumyt6kqkuht7k7ljh2ueykwmmjzoso.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.diagonal_copy]
# Source node to ATen node mapping:
# scores => diagonal_copy
# Graph fragment:
# %diagonal_copy : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%mm_16,), kwargs = {})
triton_poi_fused_diagonal_copy_9 = async_compile.triton('triton_poi_fused_diagonal_copy_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diagonal_copy_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_diagonal_copy_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (8, 1), (1, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, buf1, 128, grid=grid(128), stream=stream0)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
triton_poi_fused_leaky_relu_1.run(primals_4, buf4, 16, grid=grid(16), stream=stream0)
del primals_4
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_5, out=buf9)
del primals_5
buf10 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf9, buf10, 128, grid=grid(128), stream=stream0)
buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(buf10, primals_6, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf11, buf12, 16, grid=grid(16), stream=stream0)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_7, out=buf17)
del primals_7
buf18 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf17, buf18, 128, grid=grid(128), stream=stream0)
buf19 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.mm]
extern_kernels.mm(buf18, primals_8, out=buf19)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf19, buf20, 16, grid=grid(16), stream=stream0)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_9, out=buf25)
del primals_9
buf26 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf25, buf26, 128, grid=grid(128), stream=stream0)
buf27 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.mm]
extern_kernels.mm(buf26, primals_10, out=buf27)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf27, buf28, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf30 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_2.run(buf4, buf3, buf2, buf12, buf11, buf20, buf19, buf28, buf27, buf5, buf6, buf13, buf14, buf21, buf22, buf29, buf30, 4, grid=grid(4), stream=stream0)
buf7 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse
buf15 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0); del buf11 # reuse
buf23 = reinterpret_tensor(buf19, (4, 4), (4, 1), 0); del buf19 # reuse
buf31 = reinterpret_tensor(buf27, (4, 4), (4, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_3.run(buf7, buf15, buf23, buf31, buf4, buf3, buf5, buf6, buf12, buf13, buf14, buf20, buf21, buf22, buf28, buf29, buf30, 16, grid=grid(16), stream=stream0)
del buf13
del buf14
del buf21
del buf22
del buf29
del buf30
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.mm]
extern_kernels.mm(buf7, buf0, out=buf8)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.mm]
extern_kernels.mm(buf15, buf9, out=buf16)
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.mm]
extern_kernels.mm(buf23, buf17, out=buf24)
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.mm]
extern_kernels.mm(buf31, buf25, out=buf32)
buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(buf8, buf16, buf24, buf32, buf33, 64, grid=grid(64), stream=stream0)
buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.mm]
extern_kernels.mm(buf33, primals_11, out=buf34)
buf35 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_5], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf34, buf35, 128, grid=grid(128), stream=stream0)
buf36 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.mm]
extern_kernels.mm(buf35, primals_12, out=buf36)
buf37 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_4], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf36, buf37, 16, grid=grid(16), stream=stream0)
buf38 = buf6; del buf6 # reuse
buf39 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [zero_vec, e_4, attention_12, attention_13], Original ATen: [aten.mul, aten.leaky_relu, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_5.run(buf4, buf37, buf36, buf38, buf39, 4, grid=grid(4), stream=stream0)
buf40 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0); del buf36 # reuse
# Topologically Sorted Source Nodes: [zero_vec, e_4, attention_12, attention_13], Original ATen: [aten.mul, aten.leaky_relu, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_6.run(buf40, buf4, buf37, buf38, buf39, 16, grid=grid(16), stream=stream0)
del buf38
buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_4], Original ATen: [aten.mm]
extern_kernels.mm(buf40, buf34, out=buf41)
buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embed1], Original ATen: [aten.zeros]
triton_poi_fused_zeros_7.run(buf42, 16, grid=grid(16), stream=stream0)
buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu]
triton_poi_fused_elu_8.run(buf41, buf43, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [embed1], Original ATen: [aten._sparse_addmm]
buf44 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf42, (4, 4), (1, 4), 0), reinterpret_tensor(buf43, (4, 4), (1, 4), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), beta=0)
buf45 = buf44
del buf44
# Topologically Sorted Source Nodes: [embed2], Original ATen: [aten._sparse_addmm]
buf46 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf42, (4, 4), (1, 4), 0), reinterpret_tensor(buf43, (4, 4), (1, 4), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), beta=0)
buf47 = buf46
del buf46
buf48 = buf43; del buf43 # reuse
# Topologically Sorted Source Nodes: [mm_5], Original ATen: [aten.mm]
extern_kernels.mm(primals_15, buf47, out=buf48)
buf49 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf45, (4, 4), (1, 4), 0), buf48, out=buf49)
buf50 = reinterpret_tensor(buf39, (4, ), (1, ), 0); del buf39 # reuse
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.diagonal_copy]
triton_poi_fused_diagonal_copy_9.run(buf49, buf50, 4, grid=grid(4), stream=stream0)
del buf49
return (buf50, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41, buf42, buf48, buf45, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), reinterpret_tensor(buf47, (4, 4), (1, 4), 0), primals_14, primals_13, reinterpret_tensor(buf34, (4, 4), (1, 4), 0), reinterpret_tensor(buf35, (8, 16), (1, 8), 0), reinterpret_tensor(primals_12, (1, 8), (1, 1), 0), reinterpret_tensor(buf33, (16, 4), (1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor(buf26, (8, 16), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor(buf18, (8, 16), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(buf10, (8, 16), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(buf1, (8, 16), (1, 8), 0), reinterpret_tensor(primals_3, (1, 8), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)
], dim=1).view(N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GAT(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
"""Dense version of GAT."""
super(GAT, self).__init__()
self.dropout = dropout
self.xent = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([10]))
self.weight = nn.Parameter(torch.FloatTensor(nhid, nhid))
init.xavier_uniform_(self.weight, gain=1.414)
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nhid, dropout=
dropout, alpha=alpha, concat=False)
def forward(self, x, adj, pair1_map, pair2_map):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
embed1 = torch.sparse.mm(pair1_map, x)
embed2 = torch.sparse.mm(pair2_map, x)
scores = torch.diag(torch.mm(embed1, self.weight.mm(embed2.t())))
return scores
def loss(self, x, adj, labels, pair1_map, pair2_map):
scores = self.forward(x, adj, pair1_map, pair2_map)
return self.xent(scores, labels)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4,
'nheads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + (4 * x1 + x0) %
16 % 4), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp40 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp41 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp51 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp52 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp58 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp74 = tl.load(in_ptr5 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp75 = tl.load(in_ptr6 + 4 * x0, xmask, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr5 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp80 = tl.load(in_ptr6 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp85 = tl.load(in_ptr5 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp86 = tl.load(in_ptr6 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp91 = tl.load(in_ptr5 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp92 = tl.load(in_ptr6 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp108 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp109 = tl.load(in_ptr8 + 4 * x0, xmask, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp114 = tl.load(in_ptr8 + (1 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp119 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp120 = tl.load(in_ptr8 + (2 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp125 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp126 = tl.load(in_ptr8 + (3 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
tl.store(out_ptr2 + x0, tmp62, xmask)
tl.store(out_ptr3 + x0, tmp73, xmask)
tl.store(out_ptr4 + x0, tmp96, xmask)
tl.store(out_ptr5 + x0, tmp107, xmask)
tl.store(out_ptr6 + x0, tmp130, xmask)
tl.store(out_ptr7 + x0, tmp141, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + x2, xmask).to(tl.int1)
tmp14 = tl.load(in_out_ptr1 + x2, xmask)
tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr7 + x2, xmask).to(tl.int1)
tmp24 = tl.load(in_out_ptr2 + x2, xmask)
tmp28 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr10 + x2, xmask).to(tl.int1)
tmp34 = tl.load(in_out_ptr3 + x2, xmask)
tmp38 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(in_out_ptr0 + x2, tmp12, xmask)
tl.store(in_out_ptr1 + x2, tmp22, xmask)
tl.store(in_out_ptr2 + x2, tmp32, xmask)
tl.store(in_out_ptr3 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tl.full([1], 16, tl.int64)
tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + x2, tmp52, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_5(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_6(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tl.store(in_out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_zeros_7(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_elu_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_diagonal_copy_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (8, 1), (1, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](primals_4, buf4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_4
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_5, out=buf9)
del primals_5
buf10 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf9, buf10, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf10, primals_6, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf11, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_7, out=buf17)
del primals_7
buf18 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf17, buf18, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf18, primals_8, out=buf19)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf19, buf20, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_9, out=buf25)
del primals_9
buf26 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf25, buf26, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf26, primals_10, out=buf27)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf27, buf28, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf30 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_2[grid(4)](buf4,
buf3, buf2, buf12, buf11, buf20, buf19, buf28, buf27, buf5,
buf6, buf13, buf14, buf21, buf22, buf29, buf30, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
buf15 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0)
del buf11
buf23 = reinterpret_tensor(buf19, (4, 4), (4, 1), 0)
del buf19
buf31 = reinterpret_tensor(buf27, (4, 4), (4, 1), 0)
del buf27
triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(16)](buf7,
buf15, buf23, buf31, buf4, buf3, buf5, buf6, buf12, buf13,
buf14, buf20, buf21, buf22, buf28, buf29, buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del buf21
del buf22
del buf29
del buf30
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf7, buf0, out=buf8)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, buf9, out=buf16)
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf23, buf17, out=buf24)
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf31, buf25, out=buf32)
buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_cat_4[grid(64)](buf8, buf16, buf24, buf32, buf33,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf33, primals_11, out=buf34)
buf35 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf34, buf35, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf36 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf35, primals_12, out=buf36)
buf37 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf36, buf37, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf38 = buf6
del buf6
buf39 = buf5
del buf5
triton_poi_fused__softmax_leaky_relu_mul_where_5[grid(4)](buf4,
buf37, buf36, buf38, buf39, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf40 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0)
del buf36
triton_poi_fused__softmax_leaky_relu_mul_where_6[grid(16)](buf40,
buf4, buf37, buf38, buf39, 16, XBLOCK=16, num_warps=1, num_stages=1
)
del buf38
buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf40, buf34, out=buf41)
buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_zeros_7[grid(16)](buf42, 16, XBLOCK=16, num_warps=
1, num_stages=1)
buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_elu_8[grid(16)](buf41, buf43, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf44 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(
buf42, (4, 4), (1, 4), 0), reinterpret_tensor(buf43, (4, 4), (1,
4), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), beta=0)
buf45 = buf44
del buf44
buf46 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(
buf42, (4, 4), (1, 4), 0), reinterpret_tensor(buf43, (4, 4), (1,
4), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), beta=0)
buf47 = buf46
del buf46
buf48 = buf43
del buf43
extern_kernels.mm(primals_15, buf47, out=buf48)
buf49 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf45, (4, 4), (1, 4), 0),
buf48, out=buf49)
buf50 = reinterpret_tensor(buf39, (4,), (1,), 0)
del buf39
triton_poi_fused_diagonal_copy_9[grid(4)](buf49, buf50, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf49
return (buf50, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20,
buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41, buf42,
buf48, buf45, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0),
reinterpret_tensor(buf47, (4, 4), (1, 4), 0), primals_14,
primals_13, reinterpret_tensor(buf34, (4, 4), (1, 4), 0),
reinterpret_tensor(buf35, (8, 16), (1, 8), 0), reinterpret_tensor(
primals_12, (1, 8), (1, 1), 0), reinterpret_tensor(buf33, (16, 4),
(1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0),
reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor(
buf26, (8, 16), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8),
(1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor(
buf18, (8, 16), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), (
1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0),
reinterpret_tensor(buf10, (8, 16), (1, 8), 0), reinterpret_tensor(
primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1,
4), 0), reinterpret_tensor(buf1, (8, 16), (1, 8), 0),
reinterpret_tensor(primals_3, (1, 8), (1, 1), 0))
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)
], dim=1).view(N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GATNew(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
"""Dense version of GAT."""
super(GATNew, self).__init__()
self.dropout = dropout
self.xent = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([10]))
self.weight = nn.Parameter(torch.FloatTensor(nhid, nhid))
init.xavier_uniform_(self.weight, gain=1.414)
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nhid, dropout=
dropout, alpha=alpha, concat=False)
def loss(self, x, adj, labels, pair1_map, pair2_map):
scores = self.forward(x, adj, pair1_map, pair2_map)
return self.xent(scores, labels)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.weight
primals_2 = self.attention_0.W
primals_3 = self.attention_0.a
primals_4 = self.attention_1.W
primals_6 = self.attention_1.a
primals_5 = self.attention_2.W
primals_8 = self.attention_2.a
primals_7 = self.attention_3.W
primals_10 = self.attention_3.a
primals_11 = self.out_att.W
primals_12 = self.out_att.a
primals_9 = input_0
primals_13 = input_1
primals_14 = input_2
primals_15 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| HecatePhy/directed_graphsage | GAT | false | 17,394 | [
"MIT"
] | 6 | 0e35f8971d44b8b3477fd7339225e1a69da4456a | https://github.com/HecatePhy/directed_graphsage/tree/0e35f8971d44b8b3477fd7339225e1a69da4456a |
GridReduction1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/fv/cfvopgvqqsx4alkdzw5talfksoztd2gyjucj6hqic62vu7iebsop.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1536
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (36*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/62/c62myuf63oepurbxeoz2olrr5dwoipydpve4ekfnunp6gerrikor.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wl/cwlyygqmoh2tr4odo2ftoacxoqlew4n7mxufvlgaa5sgibpvlr5s.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 6144
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4x/c4xle4aqy37wz4bwzpqato3vjq4m7xf6qhielbefbj5qmpw3ynhc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 9216
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 96
y1 = (yindex // 96)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (96*x2) + (864*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fi/cfip7afiyerclq5vo6yxmd276ng2jbglv4ga6rioinbuofpslqci.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/77/c77uz36zxakgczhvmtzhbsfq6ju6nysfitw3rsos5757frtbc7my.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_4 => convolution_2
# x_5 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/tk/ctknqv26cakgnor7fua6jzjrtfg6txncirdcocxcujultt6x6jwz.py
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# branch_pool => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%primals_3, [3, 3], [2, 2], [0, 0], [1, 1], False), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + (x0 + (484*x1)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ra/crafg3cuwudhlkmyo3srxetfcbkisbxkwrhay7kz6wzna5b3zzol.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 384
x1 = (xindex // 384)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + (484*x1)), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6n/c6njjxvp2sdv4pgjpx2rkseqiwb4hsodm2hqmext63cgjiaxu6nf.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_6 => convolution_3
# x_7 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
x1 = (xindex // 96)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + (484*x1)), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (384, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (384, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (96, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (96, ), (1, ))
assert_size_stride(primals_8, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_9, (96, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((384, 4, 3, 3), (36, 1, 12, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 1536, 9, grid=grid(1536, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((96, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf2, 6144, 9, grid=grid(6144, 9), stream=stream0)
del primals_6
buf3 = empty_strided_cuda((96, 96, 3, 3), (864, 1, 288, 96), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_8, buf3, 9216, 9, grid=grid(9216, 9), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 384, 1, 1), (384, 1, 384, 384))
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 64, 4, 4), (1024, 1, 256, 64))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf6, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 96, 4, 4), (1536, 1, 384, 96))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf8, primals_7, 6144, grid=grid(6144), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 96, 1, 1), (96, 1, 96, 96))
buf13 = empty_strided_cuda((4, 484, 1, 1), (484, 1, 1, 1), torch.float32)
buf10 = reinterpret_tensor(buf13, (4, 4, 1, 1), (484, 1, 1, 1), 480) # alias
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_6.run(buf1, buf10, 16, grid=grid(16), stream=stream0)
buf11 = reinterpret_tensor(buf13, (4, 384, 1, 1), (484, 1, 1, 1), 0) # alias
buf15 = empty_strided_cuda((4, 384, 1, 1), (384, 1, 384, 384), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf4, primals_2, buf11, buf15, 1536, grid=grid(1536), stream=stream0)
del buf4
del primals_2
buf12 = reinterpret_tensor(buf13, (4, 96, 1, 1), (484, 1, 1, 1), 384) # alias
buf14 = empty_strided_cuda((4, 96, 1, 1), (96, 1, 96, 96), torch.bool)
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf9, primals_9, buf12, buf14, 384, grid=grid(384), stream=stream0)
del buf9
del primals_9
return (buf13, buf0, buf1, primals_4, buf2, buf3, buf6, buf8, buf14, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((384, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((96, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class GridReduction1(nn.Module):
def __init__(self, in_channels):
super(GridReduction1, self).__init__()
self.branch3x3 = Conv2d(in_channels, 384, 3, stride=2)
self.branch3x3dbl_1 = Conv2d(in_channels, 64, 1)
self.branch3x3dbl_2 = Conv2d(64, 96, 3, padding=1)
self.branch3x3dbl_3 = Conv2d(96, 96, 3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
branches = [branch3x3, branch3x3dbl, branch_pool]
outputs = torch.cat(branches, 1)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1536
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 96
y1 = yindex // 96
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 96 * x2 + 864 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + (x0 + 484 * x1), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 384
x1 = xindex // 384
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 484 * x1), tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
x1 = xindex // 96
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 484 * x1), tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (384, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (384,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (96, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (96,), (1,))
assert_size_stride(primals_8, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_9, (96,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((384, 4, 3, 3), (36, 1, 12, 4), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(1536, 9)](primals_1, buf0, 1536, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((96, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(6144, 9)](primals_6, buf2, 6144, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf3 = empty_strided_cuda((96, 96, 3, 3), (864, 1, 288, 96), torch.
float32)
triton_poi_fused_3[grid(9216, 9)](primals_8, buf3, 9216, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf4 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 384, 1, 1), (384, 1, 384, 384))
buf5 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 64, 4, 4), (1024, 1, 256, 64))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_4[grid(4096)](buf6, primals_5,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf7 = extern_kernels.convolution(buf6, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 96, 4, 4), (1536, 1, 384, 96))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_5[grid(6144)](buf8, primals_7,
6144, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 96, 1, 1), (96, 1, 96, 96))
buf13 = empty_strided_cuda((4, 484, 1, 1), (484, 1, 1, 1), torch.
float32)
buf10 = reinterpret_tensor(buf13, (4, 4, 1, 1), (484, 1, 1, 1), 480)
triton_poi_fused_max_pool2d_with_indices_6[grid(16)](buf1, buf10,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf13, (4, 384, 1, 1), (484, 1, 1, 1), 0)
buf15 = empty_strided_cuda((4, 384, 1, 1), (384, 1, 384, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(1536)](buf4
, primals_2, buf11, buf15, 1536, XBLOCK=128, num_warps=4,
num_stages=1)
del buf4
del primals_2
buf12 = reinterpret_tensor(buf13, (4, 96, 1, 1), (484, 1, 1, 1), 384)
buf14 = empty_strided_cuda((4, 96, 1, 1), (96, 1, 96, 96), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(384)](buf9,
primals_9, buf12, buf14, 384, XBLOCK=256, num_warps=4, num_stages=1
)
del buf9
del primals_9
return buf13, buf0, buf1, primals_4, buf2, buf3, buf6, buf8, buf14, buf15
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class GridReduction1New(nn.Module):
def __init__(self, in_channels):
super(GridReduction1New, self).__init__()
self.branch3x3 = Conv2d(in_channels, 384, 3, stride=2)
self.branch3x3dbl_1 = Conv2d(in_channels, 64, 1)
self.branch3x3dbl_2 = Conv2d(64, 96, 3, padding=1)
self.branch3x3dbl_3 = Conv2d(96, 96, 3, stride=2)
def forward(self, input_0):
primals_1 = self.branch3x3.conv.weight
primals_2 = self.branch3x3.conv.bias
primals_4 = self.branch3x3dbl_1.conv.weight
primals_5 = self.branch3x3dbl_1.conv.bias
primals_6 = self.branch3x3dbl_2.conv.weight
primals_7 = self.branch3x3dbl_2.conv.bias
primals_8 = self.branch3x3dbl_3.conv.weight
primals_9 = self.branch3x3dbl_3.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Hiroaki-Ozaki/modelib-classification | GridReduction1 | false | 17,395 | [
"WTFPL"
] | 10 | 11077704cc0bc9a42fc4b94da60b57d31ff0f65c | https://github.com/Hiroaki-Ozaki/modelib-classification/tree/11077704cc0bc9a42fc4b94da60b57d31ff0f65c |
InceptionB | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/fb/cfbyh7temr2u5cam5txbekh4n77qsqs7bwxugnr6jx7oljt7yy43.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/b2/cb2iqpbhi4ilwmrunmola4emmia32ze2nkmsl5hxw2dksx63axat.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 8], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (7*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (28*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6d/c6deqxpolyaesoeupqqhn2naovfoydijxbot7rblbiimk2xivdqo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 8], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 768
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (7*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (28*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xt/cxtkhyn4yl7wgyx5o6lijvllbkjit4vpcisoj6fqzzj4wcyafgdc.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/z6/cz6bovv7vxfcwgl4lygnguol6po7jhcqrjtnrsk575mlt56euylf.py
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# branch_pool => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_3, [3, 3], [1, 1], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_4 = async_compile.triton('triton_poi_fused_avg_pool2d_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 4
x1 = (xindex // 4) % 4
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-20) + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-16) + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-12) + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-4) + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x6), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (4 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (12 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (16 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (20 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + ((-1)*x1) + ((-1)*x2) + (x1*x2) + (((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5)))*((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))) + ((-1)*x1*((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))) + ((-1)*x2*((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5)))) + ((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5))) + ((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + (x6), tmp53, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lt/clt3edytkmbhgzghc3ywzihvbrnnaabsicixnmda4e75v4ko3545.py
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# outputs => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_3, %relu_8, %relu_9], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 768
x0 = xindex % 16
x2 = (xindex // 12288)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 192, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((192*x0) + (3072*x2) + x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 384, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + ((192*x0) + (3072*x2) + ((-192) + x1)), tmp15, eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr3 + ((-192) + x1), tmp15, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 576, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + ((192*x0) + (3072*x2) + ((-384) + x1)), tmp25, eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr5 + ((-384) + x1), tmp25, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tmp33 = tl.full([1], 768, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr6 + ((192*x0) + (3072*x2) + ((-576) + x1)), tmp32, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr7 + ((-576) + x1), tmp32, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + (x3), tmp43, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cq/ccqehy2co4twzcnym4exummc4m52cnryfshf3nb6futlsmi3fhyw.py
# Topologically Sorted Source Nodes: [x_18, x_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_18 => convolution_9
# x_19 => relu_9
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_9, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args
args.clear()
assert_size_stride(primals_1, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (192, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 7), (28, 7, 7, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (192, 4, 7, 1), (28, 7, 1, 1))
assert_size_stride(primals_9, (192, ), (1, ))
assert_size_stride(primals_10, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4, 7, 1), (28, 7, 1, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4, 1, 7), (28, 7, 7, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 4, 7, 1), (28, 7, 1, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (192, 4, 1, 7), (28, 7, 7, 1))
assert_size_stride(primals_19, (192, ), (1, ))
assert_size_stride(primals_20, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_21, (192, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 1, 7), (28, 1, 28, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 16, 7, grid=grid(16, 7), stream=stream0)
del primals_6
buf2 = empty_strided_cuda((192, 4, 7, 1), (28, 1, 4, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_8, buf2, 768, 7, grid=grid(768, 7), stream=stream0)
del primals_8
buf3 = empty_strided_cuda((4, 4, 7, 1), (28, 1, 4, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_12, buf3, 16, 7, grid=grid(16, 7), stream=stream0)
del primals_12
buf4 = empty_strided_cuda((4, 4, 1, 7), (28, 1, 28, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_14, buf4, 16, 7, grid=grid(16, 7), stream=stream0)
del primals_14
buf5 = empty_strided_cuda((4, 4, 7, 1), (28, 1, 4, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_16, buf5, 16, 7, grid=grid(16, 7), stream=stream0)
del primals_16
buf6 = empty_strided_cuda((192, 4, 1, 7), (28, 1, 28, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_18, buf6, 768, 7, grid=grid(768, 7), stream=stream0)
del primals_18
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 192, 4, 4), (3072, 1, 768, 192))
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 1, 16, 4))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf9, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf1, stride=(1, 1), padding=(0, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf11, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf2, stride=(1, 1), padding=(3, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 192, 4, 4), (3072, 1, 768, 192))
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf0, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4, 4), (64, 1, 16, 4))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf14, primals_11, 256, grid=grid(256), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, buf3, stride=(1, 1), padding=(3, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4, 4), (64, 1, 16, 4))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf16, primals_13, 256, grid=grid(256), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, buf4, stride=(1, 1), padding=(0, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 4, 4, 4), (64, 1, 16, 4))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf18, primals_15, 256, grid=grid(256), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, buf5, stride=(1, 1), padding=(3, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 4, 4, 4), (64, 1, 16, 4))
buf20 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf20, primals_17, 256, grid=grid(256), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, buf6, stride=(1, 1), padding=(0, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 192, 4, 4), (3072, 1, 768, 192))
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_4.run(buf0, buf22, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 192, 4, 4), (3072, 1, 768, 192))
buf24 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf7, primals_2, buf12, primals_9, buf21, primals_19, buf23, primals_21, buf24, 49152, grid=grid(49152), stream=stream0)
buf25 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192), torch.bool)
# Topologically Sorted Source Nodes: [x_18, x_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_6.run(buf23, primals_21, buf25, 12288, grid=grid(12288), stream=stream0)
del buf23
del primals_21
buf26 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192), torch.bool)
# Topologically Sorted Source Nodes: [x_16, x_17], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_6.run(buf21, primals_19, buf26, 12288, grid=grid(12288), stream=stream0)
del buf21
del primals_19
buf27 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192), torch.bool)
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_6.run(buf12, primals_9, buf27, 12288, grid=grid(12288), stream=stream0)
del buf12
del primals_9
buf28 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_6.run(buf7, primals_2, buf28, 12288, grid=grid(12288), stream=stream0)
del buf7
del primals_2
return (buf24, primals_1, buf0, primals_4, buf1, buf2, primals_10, buf3, buf4, buf5, buf6, primals_20, buf9, buf11, buf14, buf16, buf18, buf20, buf22, buf25, buf26, buf27, buf28, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((192, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 7), (28, 7, 7, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((192, 4, 7, 1), (28, 7, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 7, 1), (28, 7, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4, 1, 7), (28, 7, 7, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4, 7, 1), (28, 7, 1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((192, 4, 1, 7), (28, 7, 7, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((192, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class InceptionB(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionB, self).__init__()
self.branch1x1 = Conv2d(in_channels, 192, 1)
c7 = channels_7x7
self.branch7x7_1 = Conv2d(in_channels, c7, 1)
self.branch7x7_2 = Conv2d(c7, c7, (1, 7), padding=(0, 3))
self.branch7x7_3 = Conv2d(c7, 192, (7, 1), padding=(3, 0))
self.branch7x7dbl_1 = Conv2d(in_channels, c7, 1)
self.branch7x7dbl_2 = Conv2d(c7, c7, (7, 1), padding=(3, 0))
self.branch7x7dbl_3 = Conv2d(c7, c7, (1, 7), padding=(0, 3))
self.branch7x7dbl_4 = Conv2d(c7, c7, (7, 1), padding=(3, 0))
self.branch7x7dbl_5 = Conv2d(c7, 192, (1, 7), padding=(0, 3))
self.branch_pool = Conv2d(in_channels, 192, 1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
branches = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
outputs = torch.cat(branches, 1)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'channels_7x7': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 28 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 28 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 4
x1 = xindex // 4 % 4
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-20 + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-16 + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-12 + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-4 + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (4 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (12 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (16 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (20 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (5 * (5 <= 2 + x1) + (2 + x1) *
(2 + x1 < 5)) * (5 * (5 <= 2 + x2) + (2 + x2) * (2 + x2 < 5)
) + -1 * x1 * (5 * (5 <= 2 + x2) + (2 + x2) * (2 + x2 < 5)
) + -1 * x2 * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)) + (5 *
(5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)) + (5 * (5 <= 2 + x2) + (2 +
x2) * (2 + x2 < 5))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 768
x0 = xindex % 16
x2 = xindex // 12288
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 192, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (192 * x0 + 3072 * x2 + x1), tmp4,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 384, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + (192 * x0 + 3072 * x2 + (-192 + x1)), tmp15,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr3 + (-192 + x1), tmp15, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 576, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + (192 * x0 + 3072 * x2 + (-384 + x1)), tmp25,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr5 + (-384 + x1), tmp25, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tl.full([1], 768, tl.int64)
tmp35 = tl.load(in_ptr6 + (192 * x0 + 3072 * x2 + (-576 + x1)), tmp32,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr7 + (-576 + x1), tmp32, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + x3, tmp43, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (192,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 7), (28, 7, 7, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (192, 4, 7, 1), (28, 7, 1, 1))
assert_size_stride(primals_9, (192,), (1,))
assert_size_stride(primals_10, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 7, 1), (28, 7, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4, 1, 7), (28, 7, 7, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4, 7, 1), (28, 7, 1, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (192, 4, 1, 7), (28, 7, 7, 1))
assert_size_stride(primals_19, (192,), (1,))
assert_size_stride(primals_20, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_21, (192,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((4, 4, 1, 7), (28, 1, 28, 4), torch.float32)
triton_poi_fused_1[grid(16, 7)](primals_6, buf1, 16, 7, XBLOCK=8,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((192, 4, 7, 1), (28, 1, 4, 4), torch.float32)
triton_poi_fused_2[grid(768, 7)](primals_8, buf2, 768, 7, XBLOCK=8,
YBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((4, 4, 7, 1), (28, 1, 4, 4), torch.float32)
triton_poi_fused_1[grid(16, 7)](primals_12, buf3, 16, 7, XBLOCK=8,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_12
buf4 = empty_strided_cuda((4, 4, 1, 7), (28, 1, 28, 4), torch.float32)
triton_poi_fused_1[grid(16, 7)](primals_14, buf4, 16, 7, XBLOCK=8,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_14
buf5 = empty_strided_cuda((4, 4, 7, 1), (28, 1, 4, 4), torch.float32)
triton_poi_fused_1[grid(16, 7)](primals_16, buf5, 16, 7, XBLOCK=8,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_16
buf6 = empty_strided_cuda((192, 4, 1, 7), (28, 1, 28, 4), torch.float32
)
triton_poi_fused_2[grid(768, 7)](primals_18, buf6, 768, 7, XBLOCK=8,
YBLOCK=128, num_warps=4, num_stages=1)
del primals_18
buf7 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 192, 4, 4), (3072, 1, 768, 192))
buf8 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 1, 16, 4))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_3[grid(256)](buf9, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf10 = extern_kernels.convolution(buf9, buf1, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_3[grid(256)](buf11, primals_7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, buf2, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 192, 4, 4), (3072, 1, 768, 192))
buf13 = extern_kernels.convolution(buf0, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4, 4), (64, 1, 16, 4))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_3[grid(256)](buf14, primals_11,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf15 = extern_kernels.convolution(buf14, buf3, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4, 4), (64, 1, 16, 4))
buf16 = buf15
del buf15
triton_poi_fused_convolution_relu_3[grid(256)](buf16, primals_13,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf17 = extern_kernels.convolution(buf16, buf4, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 4, 4, 4), (64, 1, 16, 4))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_3[grid(256)](buf18, primals_15,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf19 = extern_kernels.convolution(buf18, buf5, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 4, 4, 4), (64, 1, 16, 4))
buf20 = buf19
del buf19
triton_poi_fused_convolution_relu_3[grid(256)](buf20, primals_17,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_17
buf21 = extern_kernels.convolution(buf20, buf6, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 192, 4, 4), (3072, 1, 768, 192))
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_avg_pool2d_4[grid(256)](buf0, buf22, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf23 = extern_kernels.convolution(buf22, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 192, 4, 4), (3072, 1, 768, 192))
buf24 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch
.float32)
triton_poi_fused_cat_5[grid(49152)](buf7, primals_2, buf12,
primals_9, buf21, primals_19, buf23, primals_21, buf24, 49152,
XBLOCK=512, num_warps=4, num_stages=1)
buf25 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(12288)](
buf23, primals_21, buf25, 12288, XBLOCK=256, num_warps=4,
num_stages=1)
del buf23
del primals_21
buf26 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(12288)](
buf21, primals_19, buf26, 12288, XBLOCK=256, num_warps=4,
num_stages=1)
del buf21
del primals_19
buf27 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(12288)](
buf12, primals_9, buf27, 12288, XBLOCK=256, num_warps=4,
num_stages=1)
del buf12
del primals_9
buf28 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(12288)](
buf7, primals_2, buf28, 12288, XBLOCK=256, num_warps=4,
num_stages=1)
del buf7
del primals_2
return (buf24, primals_1, buf0, primals_4, buf1, buf2, primals_10, buf3,
buf4, buf5, buf6, primals_20, buf9, buf11, buf14, buf16, buf18,
buf20, buf22, buf25, buf26, buf27, buf28)
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class InceptionBNew(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionBNew, self).__init__()
self.branch1x1 = Conv2d(in_channels, 192, 1)
c7 = channels_7x7
self.branch7x7_1 = Conv2d(in_channels, c7, 1)
self.branch7x7_2 = Conv2d(c7, c7, (1, 7), padding=(0, 3))
self.branch7x7_3 = Conv2d(c7, 192, (7, 1), padding=(3, 0))
self.branch7x7dbl_1 = Conv2d(in_channels, c7, 1)
self.branch7x7dbl_2 = Conv2d(c7, c7, (7, 1), padding=(3, 0))
self.branch7x7dbl_3 = Conv2d(c7, c7, (1, 7), padding=(0, 3))
self.branch7x7dbl_4 = Conv2d(c7, c7, (7, 1), padding=(3, 0))
self.branch7x7dbl_5 = Conv2d(c7, 192, (1, 7), padding=(0, 3))
self.branch_pool = Conv2d(in_channels, 192, 1)
def forward(self, input_0):
primals_1 = self.branch1x1.conv.weight
primals_2 = self.branch1x1.conv.bias
primals_4 = self.branch7x7_1.conv.weight
primals_5 = self.branch7x7_1.conv.bias
primals_6 = self.branch7x7_2.conv.weight
primals_7 = self.branch7x7_2.conv.bias
primals_8 = self.branch7x7_3.conv.weight
primals_9 = self.branch7x7_3.conv.bias
primals_10 = self.branch7x7dbl_1.conv.weight
primals_11 = self.branch7x7dbl_1.conv.bias
primals_12 = self.branch7x7dbl_2.conv.weight
primals_13 = self.branch7x7dbl_2.conv.bias
primals_14 = self.branch7x7dbl_3.conv.weight
primals_15 = self.branch7x7dbl_3.conv.bias
primals_16 = self.branch7x7dbl_4.conv.weight
primals_17 = self.branch7x7dbl_4.conv.bias
primals_18 = self.branch7x7dbl_5.conv.weight
primals_19 = self.branch7x7dbl_5.conv.bias
primals_20 = self.branch_pool.conv.weight
primals_21 = self.branch_pool.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
| Hiroaki-Ozaki/modelib-classification | InceptionB | false | 17,396 | [
"WTFPL"
] | 10 | 11077704cc0bc9a42fc4b94da60b57d31ff0f65c | https://github.com/Hiroaki-Ozaki/modelib-classification/tree/11077704cc0bc9a42fc4b94da60b57d31ff0f65c |
GroupGRUCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/uo/cuo7v3tmqap62gqxtenascsyx4gzrkrrz34zfainxgps6cvix2rl.py
# Topologically Sorted Source Nodes: [add, resetgate, add_1, inputgate, mul, add_2, newgate, sub, mul_1, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# hy => add_3
# inputgate => sigmoid_1
# mul => mul
# mul_1 => mul_1
# newgate => tanh
# resetgate => sigmoid
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_1, %getitem_4), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {})
# %tanh : [num_users=3] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %tanh), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, %mul_1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sub_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sub_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sub_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x4 = xindex
x2 = (xindex // 4) % 4
x3 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0 + (12*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (x0 + (12*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (8 + x0 + (12*x1)), xmask)
tmp13 = tl.load(in_ptr2 + (x0 + (4*x3) + (16*x2)), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp7 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.tanh(tmp11)
tmp14 = tmp13 - tmp12
tmp15 = tmp3 * tmp14
tmp16 = tmp12 + tmp15
tl.store(out_ptr0 + (x4), tmp3, xmask)
tl.store(out_ptr1 + (x4), tmp7, xmask)
tl.store(out_ptr2 + (x4), tmp12, xmask)
tl.store(out_ptr3 + (x4), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 12), (48, 12, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 12), (48, 12, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_3, (4, 4, 4), (4, 16, 1), 0), primals_4, out=buf1)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, resetgate, add_1, inputgate, mul, add_2, newgate, sub, mul_1, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0.run(buf0, buf1, primals_3, buf3, buf2, buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf0
return (buf5, primals_3, reinterpret_tensor(buf1, (4, 4, 4), (12, 48, 1), 8), buf2, buf3, buf4, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 12), (48, 12, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 12), (48, 12, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class GroupLinearLayer(nn.Module):
def __init__(self, din, dout, num_blocks):
super(GroupLinearLayer, self).__init__()
self.w = nn.Parameter(0.01 * torch.randn(num_blocks, din, dout))
def forward(self, x):
x = x.permute(1, 0, 2)
x = torch.bmm(x, self.w)
return x.permute(1, 0, 2)
class GroupGRUCell(nn.Module):
"""
GroupGRUCell can compute the operation of N GRU Cells at once.
"""
def __init__(self, input_size, hidden_size, num_grus):
super(GroupGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.x2h = GroupLinearLayer(input_size, 3 * hidden_size, num_grus)
self.h2h = GroupLinearLayer(hidden_size, 3 * hidden_size, num_grus)
self.reset_parameters()
def reset_parameters(self):
1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data = torch.ones(w.data.size())
def forward(self, x, hidden):
"""
input: x (batch_size, num_grus, input_size)
hidden (batch_size, num_grus, hidden_size)
output: hidden (batch_size, num_grus, hidden_size)
"""
gate_x = self.x2h(x)
gate_h = self.h2h(hidden)
i_r, i_i, i_n = gate_x.chunk(3, 2)
h_r, h_i, h_n = gate_h.chunk(3, 2)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_grus': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x4 = xindex
x2 = xindex // 4 % 4
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0 + 12 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (x0 + 12 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (8 + x0 + 12 * x1), xmask)
tmp13 = tl.load(in_ptr2 + (x0 + 4 * x3 + 16 * x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp7 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.tanh(tmp11)
tmp14 = tmp13 - tmp12
tmp15 = tmp3 * tmp14
tmp16 = tmp12 + tmp15
tl.store(out_ptr0 + x4, tmp3, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
tl.store(out_ptr2 + x4, tmp12, xmask)
tl.store(out_ptr3 + x4, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 12), (48, 12, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 12), (48, 12, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 4), (4, 16,
1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (4, 4, 4), (4, 16,
1), 0), primals_4, out=buf1)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(64)](buf0, buf1,
primals_3, buf3, buf2, buf4, buf5, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf0
return buf5, primals_3, reinterpret_tensor(buf1, (4, 4, 4), (12, 48, 1), 8
), buf2, buf3, buf4, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1,
16), 0)
class GroupLinearLayer(nn.Module):
def __init__(self, din, dout, num_blocks):
super(GroupLinearLayer, self).__init__()
self.w = nn.Parameter(0.01 * torch.randn(num_blocks, din, dout))
def forward(self, x):
x = x.permute(1, 0, 2)
x = torch.bmm(x, self.w)
return x.permute(1, 0, 2)
class GroupGRUCellNew(nn.Module):
"""
GroupGRUCell can compute the operation of N GRU Cells at once.
"""
def __init__(self, input_size, hidden_size, num_grus):
super(GroupGRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.x2h = GroupLinearLayer(input_size, 3 * hidden_size, num_grus)
self.h2h = GroupLinearLayer(hidden_size, 3 * hidden_size, num_grus)
self.reset_parameters()
def reset_parameters(self):
1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data = torch.ones(w.data.size())
def forward(self, input_0, input_1):
primals_2 = self.x2h.w
primals_4 = self.h2h.w
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Hritikbansal/RNNs_SVA_OOD | GroupGRUCell | false | 17,397 | [
"MIT"
] | 4 | a1c73955342d9d35c49da5fcb7b315e37b0f75d1 | https://github.com/Hritikbansal/RNNs_SVA_OOD/tree/a1c73955342d9d35c49da5fcb7b315e37b0f75d1 |
CNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cn/ccn3wvv7hvajmiytvd4cxr5ymsooreymqjbzit5wvlzzy3qc5dck.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jk/cjkdicw2e3tkv3khukujnpbucriifiixtykbm5ehkmwsjglj4nlc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 48
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (75*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/g6/cg6be7dunuqnqegni7bu3gst72ztqnw4kjfvoz77hlhvlq3tjpsm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = (yindex // 16)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (16*x2) + (400*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xe/cxezqc3od6fvix7jmd4sjricohvmoq732s4f6uqsthmkgdm6cmvy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/io/ciokq54ir4ozfawt7dspetxwbigmo7d77gcou52dgqncoo5uu3ve.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ll/cllywxdwrpkbgh3zeqx3edwjb7vu4waf3hcbxgd4nrh3lgpx47tm.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out => convolution
# out_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jc/cjczopa6phcfbq4gf5u2myzcc4mk6uflsmdwfbqy7mlelfhi3l37.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16) % 32
x2 = (xindex // 512)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (32*x1) + (2048*x2)), None)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (32*x1) + (2048*x2)), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + (32*x1) + (2048*x2)), None)
tmp5 = tl.load(in_ptr0 + (1040 + x0 + (32*x1) + (2048*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kx/ckx6beddqq333ye5pc4oshqp3zmb65ody6g2wp4d3bpke2674rr6.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_3 => convolution_1
# out_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6v/c6v6t27lljioios2y5xqeycgzbrnbk3rlpo4zio2yyxklgo2cnlk.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32) % 16
x2 = (xindex // 512)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (2048*x2)), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (2048*x2)), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + (64*x1) + (2048*x2)), None)
tmp5 = tl.load(in_ptr0 + (1056 + x0 + (64*x1) + (2048*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ce/ccesoeufupkjrtywborwkkkgn7tuz7khv5r7nda2gwzihxompoiq.py
# Topologically Sorted Source Nodes: [conv2d_2, out_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# out_6 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ld/cldrswfnua23pmo3y2o5nevobgvmowgswvpshc7iidcjbolw2s2t.py
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_7 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = (xindex // 64) % 8
x2 = (xindex // 512)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (2048*x2)), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (2048*x2)), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + (128*x1) + (2048*x2)), None)
tmp5 = tl.load(in_ptr0 + (1088 + x0 + (128*x1) + (2048*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hv/chvrpuscht3ew5huphzpvx4k4mjjebddshmvrmrmsuwizuminzib.py
# Topologically Sorted Source Nodes: [conv2d_3, out_8], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# out_8 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ev/cevnpdburvakaq2z5ozhavtc7fgnjhfdpwe54d3mf3hinudsapvk.py
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_9 => _low_memory_max_pool2d_with_offsets_3, getitem_7
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y5 = yindex
y4 = (yindex // 16)
y6 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (2048*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (2048*y1)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1024 + x2 + (256*y0) + (2048*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1152 + x2 + (256*y0) + (2048*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + (16*x2) + (2048*y4)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/oz/cozw75bq4npy3g5f7dksuozxlpzhbbcxowfdom3btrlpxun3dstz.py
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_12 => relu_4
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_13 = async_compile.triton('triton_poi_fused_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/od/codkckglbsk37w4tdo65vytmlvjz3xcl66asugp4jc2jmrlempjj.py
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_14 => relu_5
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_14 = async_compile.triton('triton_poi_fused_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (16, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 2048), (2048, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (64, 256), (256, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (7, 64), (64, 1))
assert_size_stride(primals_15, (7, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 3, 5, 5), (75, 1, 15, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 48, 25, grid=grid(48, 25), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 512, 25, grid=grid(512, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 2048, 9, grid=grid(2048, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf6, primals_3, 262144, grid=grid(262144), stream=stream0)
del primals_3
buf7 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16), torch.float32)
buf8 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16), torch.int8)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_6.run(buf6, buf7, buf8, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf10, primals_5, 131072, grid=grid(131072), stream=stream0)
del primals_5
buf11 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32), torch.float32)
buf12 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32), torch.int8)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, out_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf14, primals_7, 65536, grid=grid(65536), stream=stream0)
del primals_7
buf15 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.float32)
buf16 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.int8)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf14, buf15, buf16, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, out_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf18, primals_9, 32768, grid=grid(32768), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.int8)
buf20 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf18, buf19, buf20, 64, 128, grid=grid(64, 128), stream=stream0)
buf21 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (4, 2048), (2048, 1), 0), reinterpret_tensor(primals_10, (2048, 256), (1, 2048), 0), out=buf21)
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.relu]
triton_poi_fused_relu_13.run(buf22, primals_11, 1024, grid=grid(1024), stream=stream0)
del primals_11
buf23 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf22, reinterpret_tensor(primals_12, (256, 64), (1, 256), 0), out=buf23)
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.relu]
triton_poi_fused_relu_14.run(buf24, primals_13, 256, grid=grid(256), stream=stream0)
del primals_13
buf25 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_15], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf24, reinterpret_tensor(primals_14, (64, 7), (1, 64), 0), alpha=1, beta=1, out=buf25)
del primals_15
return (buf25, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor(buf20, (4, 2048), (2048, 1), 0), buf22, buf24, primals_14, primals_12, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((7, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((7, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, action_dim=7):
super(CNN, self).__init__()
self.action_dim = action_dim
self.conv1 = nn.Conv2d(3, 16, 5, padding=2)
self.conv2 = nn.Conv2d(16, 32, 5, padding=2)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.conv4 = nn.Conv2d(64, 128, 3, padding=1)
self.fc1 = nn.Linear(128 * 4 * 4, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, self.action_dim)
def forward(self, x):
in_size = x.size(0)
out = self.conv1(x)
out = F.relu(out)
out = F.max_pool2d(out, 2, 2)
out = self.conv2(out)
out = F.relu(out)
out = F.max_pool2d(out, 2, 2)
out = F.relu(self.conv3(out))
out = F.max_pool2d(out, 2, 2)
out = F.relu(self.conv4(out))
out = F.max_pool2d(out, 2, 2)
out = out.view(in_size, -1)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
out = F.relu(out)
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 48
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 400 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 32
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 32 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1040 + x0 + 32 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 16
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 64 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1056 + x0 + 64 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 8
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 128 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1088 + x0 + 128 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y5 = yindex
y4 = yindex // 16
y6 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 2048 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 2048 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1024 + x2 + 256 * y0 + 2048 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1152 + x2 + 256 * y0 + 2048 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 16 * x2 + 2048 * y4), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (16, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 2048), (2048, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (64, 256), (256, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (7, 64), (64, 1))
assert_size_stride(primals_15, (7,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 3, 5, 5), (75, 1, 15, 3), torch.float32)
triton_poi_fused_1[grid(48, 25)](primals_2, buf1, 48, 25, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch.
float32)
triton_poi_fused_2[grid(512, 25)](primals_4, buf2, 512, 25, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_3[grid(2048, 9)](primals_6, buf3, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_4[grid(8192, 9)](primals_8, buf4, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(262144)](buf6, primals_3,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf7 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16),
torch.float32)
buf8 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(65536)](buf6, buf7,
buf8, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(131072)](buf10, primals_5,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.float32)
buf12 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(32768)](buf10,
buf11, buf12, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_9[grid(65536)](buf14, primals_7,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch
.float32)
buf16 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(16384)](buf14,
buf15, buf16, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_11[grid(32768)](buf18, primals_9,
32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.int8)
buf20 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(64, 128)](buf18,
buf19, buf20, 64, 128, XBLOCK=128, YBLOCK=2, num_warps=4,
num_stages=1)
buf21 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (4, 2048), (2048, 1), 0
), reinterpret_tensor(primals_10, (2048, 256), (1, 2048), 0),
out=buf21)
buf22 = buf21
del buf21
triton_poi_fused_relu_13[grid(1024)](buf22, primals_11, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf23 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf22, reinterpret_tensor(primals_12, (256, 64),
(1, 256), 0), out=buf23)
buf24 = buf23
del buf23
triton_poi_fused_relu_14[grid(256)](buf24, primals_13, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_13
buf25 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
extern_kernels.addmm(primals_15, buf24, reinterpret_tensor(
primals_14, (64, 7), (1, 64), 0), alpha=1, beta=1, out=buf25)
del primals_15
return (buf25, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10,
buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor
(buf20, (4, 2048), (2048, 1), 0), buf22, buf24, primals_14,
primals_12, primals_10)
class CNNNew(nn.Module):
def __init__(self, action_dim=7):
super(CNNNew, self).__init__()
self.action_dim = action_dim
self.conv1 = nn.Conv2d(3, 16, 5, padding=2)
self.conv2 = nn.Conv2d(16, 32, 5, padding=2)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.conv4 = nn.Conv2d(64, 128, 3, padding=1)
self.fc1 = nn.Linear(128 * 4 * 4, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, self.action_dim)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_12 = self.fc2.weight
primals_13 = self.fc2.bias
primals_14 = self.fc3.weight
primals_15 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| HeegerGao/CRIL | CNN | false | 17,398 | [
"MIT"
] | 9 | c4095bca7cf5c8e376b0014447b1422c1b5b6cec | https://github.com/HeegerGao/CRIL/tree/c4095bca7cf5c8e376b0014447b1422c1b5b6cec |
DecayModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/e2/ce2jt6xlphadls2ya3qtkdqfji5jgeyiafb6s3scf5xfh7br3ztq.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_2,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zk/czkyidvy5lqk5ou4dutswvq3khdq27ufdhx62du4vhfbnebib743.py
# Topologically Sorted Source Nodes: [hx, matmul_1, w_h], Original ATen: [aten.new_zeros, aten.mv, aten.add]
# Source node to ATen node mapping:
# hx => full_default
# matmul_1 => mul, sum_1
# w_h => add_1
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, %full_default), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %sum_1), kwargs = {})
triton_poi_fused_add_mv_new_zeros_1 = async_compile.triton('triton_poi_fused_add_mv_new_zeros_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mv_new_zeros_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mv_new_zeros_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = 0.0
tmp3 = tmp1 * tmp2
tmp5 = tmp4 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp2
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp2
tmp12 = tmp9 + tmp11
tmp13 = tmp0 + tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2p/c2p5uhunqvopd6fepwr2gap572uizhm2m44dwqbpprtoet3xq5fb.py
# Topologically Sorted Source Nodes: [hx, w_x, matmul_1, w_h, sigmoid, mul, sub, add_2, mul_1, w_w, h], Original ATen: [aten.new_zeros, aten.add, aten.mv, aten.sigmoid, aten.mul, aten.rsub, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add_2 => add_2
# h => relu_1
# hx => full_default
# matmul_1 => mul, sum_1
# mul => mul_1
# mul_1 => mul_2
# sigmoid => sigmoid
# sub => sub
# w_h => add_1
# w_w => add_3
# w_x => add
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %permute), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, %full_default), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %sum_1), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_7,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %full_default), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %add_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_mul_mv_new_zeros_relu_rsub_sigmoid_threshold_backward_2 = async_compile.triton('triton_poi_fused_add_mul_mv_new_zeros_relu_rsub_sigmoid_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_mv_new_zeros_relu_rsub_sigmoid_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_mv_new_zeros_relu_rsub_sigmoid_threshold_backward_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tl.sigmoid(tmp6)
tmp8 = 0.0
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp7
tmp12 = tmp11 * tmp4
tmp13 = tmp9 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tmp15 <= tmp8
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(primals_2, buf0, buf7, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dale_hh], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_3, out=buf1)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(primals_5, primals_1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [hx, matmul_1, w_h], Original ATen: [aten.new_zeros, aten.mv, aten.add]
triton_poi_fused_add_mv_new_zeros_1.run(primals_6, buf1, buf3, 4, grid=grid(4), stream=stream0)
del primals_6
buf4 = reinterpret_tensor(buf2, (4, 4), (1, 4), 0); del buf2 # reuse
buf5 = reinterpret_tensor(buf1, (4, 4), (1, 4), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 4), (1, 4), torch.bool)
# Topologically Sorted Source Nodes: [hx, w_x, matmul_1, w_h, sigmoid, mul, sub, add_2, mul_1, w_w, h], Original ATen: [aten.new_zeros, aten.add, aten.mv, aten.sigmoid, aten.mul, aten.rsub, aten.relu, aten.threshold_backward]
triton_poi_fused_add_mul_mv_new_zeros_relu_rsub_sigmoid_threshold_backward_2.run(buf4, primals_4, buf3, primals_7, buf5, buf6, 16, grid=grid(16), stream=stream0)
del buf3
del primals_4
return (buf5, primals_7, buf4, buf6, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class DecayModule(nn.Module):
def __init__(self, input_size, hidden_size, bias=True, num_chunks=1,
activation='relu', nodiag=False):
super(DecayModule, self).__init__()
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.input_size = input_size
self.hidden_size = hidden_size
self.nodiag = nodiag
self.bias = bias
self.num_chunks = num_chunks
self.rgate = nn.Parameter(torch.tensor(0.8), requires_grad=True)
self.weight_ih = nn.Parameter(torch.Tensor(num_chunks * hidden_size,
input_size))
self.weight_hh = nn.Parameter(torch.Tensor(num_chunks * hidden_size,
hidden_size))
self.d_rec = nn.Parameter(torch.zeros(num_chunks * hidden_size,
hidden_size), requires_grad=False)
self.activation = activation
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(num_chunks * hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(num_chunks * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
if self.nodiag:
for i in range(hidden_size):
self.weight_hh.data[i, i] = 0
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
for name, param in self.named_parameters():
if name == 'rgate':
param.data = torch.tensor(1.4)
for i in range(self.num_chunks):
x = i * self.hidden_size
for j in range(self.hidden_size):
if j < 0.8 * self.hidden_size:
self.d_rec[x + j][j] = 1.0
else:
self.d_rec[x + j][j] = -1.0
def forward(self, input_, hx=None):
if hx is None:
hx = input_.new_zeros(self.num_chunks * self.hidden_size,
requires_grad=False)
dale_hh = torch.mm(self.relu(self.weight_hh), self.d_rec)
if self.bias:
w_x = self.bias_ih + torch.matmul(self.weight_ih, input_).t()
w_h = self.bias_hh + torch.matmul(dale_hh, hx.t()).t()
else:
w_x = torch.matmul(self.weight_ih, input_).t()
w_h = torch.matmul(dale_hh, hx.t()).t()
w_w = self.sigmoid(self.rgate) * hx + (1 - self.sigmoid(self.rgate)
) * (w_x + w_h)
if self.activation == 'tanh':
h = self.tanh(w_w)
else:
h = self.relu(w_w)
return h
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_mv_new_zeros_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = 0.0
tmp3 = tmp1 * tmp2
tmp5 = tmp4 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp2
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp2
tmp12 = tmp9 + tmp11
tmp13 = tmp0 + tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_mul_mv_new_zeros_relu_rsub_sigmoid_threshold_backward_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tl.sigmoid(tmp6)
tmp8 = 0.0
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp7
tmp12 = tmp11 * tmp4
tmp13 = tmp9 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tmp15 <= tmp8
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](primals_2,
buf0, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, primals_3, out=buf1)
buf2 = buf0
del buf0
extern_kernels.mm(primals_5, primals_1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_add_mv_new_zeros_1[grid(4)](primals_6, buf1, buf3,
4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_6
buf4 = reinterpret_tensor(buf2, (4, 4), (1, 4), 0)
del buf2
buf5 = reinterpret_tensor(buf1, (4, 4), (1, 4), 0)
del buf1
buf6 = empty_strided_cuda((4, 4), (1, 4), torch.bool)
triton_poi_fused_add_mul_mv_new_zeros_relu_rsub_sigmoid_threshold_backward_2[
grid(16)](buf4, primals_4, buf3, primals_7, buf5, buf6, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf3
del primals_4
return buf5, primals_7, buf4, buf6, reinterpret_tensor(primals_1, (4, 4
), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), buf7
class DecayModuleNew(nn.Module):
def __init__(self, input_size, hidden_size, bias=True, num_chunks=1,
activation='relu', nodiag=False):
super(DecayModuleNew, self).__init__()
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.input_size = input_size
self.hidden_size = hidden_size
self.nodiag = nodiag
self.bias = bias
self.num_chunks = num_chunks
self.rgate = nn.Parameter(torch.tensor(0.8), requires_grad=True)
self.weight_ih = nn.Parameter(torch.Tensor(num_chunks * hidden_size,
input_size))
self.weight_hh = nn.Parameter(torch.Tensor(num_chunks * hidden_size,
hidden_size))
self.d_rec = nn.Parameter(torch.zeros(num_chunks * hidden_size,
hidden_size), requires_grad=False)
self.activation = activation
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(num_chunks * hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(num_chunks * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
if self.nodiag:
for i in range(hidden_size):
self.weight_hh.data[i, i] = 0
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
for name, param in self.named_parameters():
if name == 'rgate':
param.data = torch.tensor(1.4)
for i in range(self.num_chunks):
x = i * self.hidden_size
for j in range(self.hidden_size):
if j < 0.8 * self.hidden_size:
self.d_rec[x + j][j] = 1.0
else:
self.d_rec[x + j][j] = -1.0
def forward(self, input_0):
primals_7 = self.rgate
primals_1 = self.weight_ih
primals_2 = self.weight_hh
primals_3 = self.d_rec
primals_4 = self.bias_ih
primals_6 = self.bias_hh
primals_5 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Hritikbansal/RNNs_SVA_OOD | DecayModule | false | 17,399 | [
"MIT"
] | 4 | a1c73955342d9d35c49da5fcb7b315e37b0f75d1 | https://github.com/Hritikbansal/RNNs_SVA_OOD/tree/a1c73955342d9d35c49da5fcb7b315e37b0f75d1 |
GridReduction2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/fb/cfbyh7temr2u5cam5txbekh4n77qsqs7bwxugnr6jx7oljt7yy43.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6m/c6mkfezu4xzld25fhnabqh7yedvr4zustapn7rkogaxagjozifrg.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 61440
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (1728*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lf/clfhley5mfemubluivk2iadf3xcd36zzbz6j37jt6gmlo72j46ob.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 8], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36864
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (7*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (1344*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/35/c35ty2bo6dzl3tqof6jfyfvrncssci7msndoq2u6dqlvbk7yabji.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36864
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (1728*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/eb/ceb667bvks5rzvs6us5reyp3pftcli63lxv3tjsaa5m7xvzrvybr.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rq/crqlwkvvdr5nunvmsv7p3crs4s3clbcthf6zvu3k2j7kg3zhtbvc.py
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# branch_pool => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%primals_3, [3, 3], [2, 2], [0, 0], [1, 1], False), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + (x0 + (516*x1)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/tm/ctmocgicbjehiwbeant5i2efv7b3nf6t37akqtgwyrp75xfdaocj.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 320
x1 = (xindex // 320)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + (516*x1)), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/if/cifuc7wppkhvhmcxnjf32qf5gyexyix6lqc6l2ce3prbv6xkrqu2.py
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_10 => convolution_5
# x_11 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
x1 = (xindex // 192)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + (516*x1)), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (192, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (320, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_5, (320, ), (1, ))
assert_size_stride(primals_6, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (192, ), (1, ))
assert_size_stride(primals_8, (192, 192, 1, 7), (1344, 7, 7, 1))
assert_size_stride(primals_9, (192, ), (1, ))
assert_size_stride(primals_10, (192, 192, 7, 1), (1344, 7, 1, 1))
assert_size_stride(primals_11, (192, ), (1, ))
assert_size_stride(primals_12, (192, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_13, (192, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((320, 192, 3, 3), (1728, 1, 576, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf1, 61440, 9, grid=grid(61440, 9), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((192, 192, 1, 7), (1344, 1, 1344, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_8, buf2, 36864, 7, grid=grid(36864, 7), stream=stream0)
del primals_8
buf3 = empty_strided_cuda((192, 192, 7, 1), (1344, 1, 192, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_10, buf3, 36864, 7, grid=grid(36864, 7), stream=stream0)
del primals_10
buf4 = empty_strided_cuda((192, 192, 3, 3), (1728, 1, 576, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_12, buf4, 36864, 9, grid=grid(36864, 9), stream=stream0)
del primals_12
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 192, 4, 4), (3072, 1, 768, 192))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf6, primals_2, 12288, grid=grid(12288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 320, 1, 1), (320, 1, 320, 320))
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 192, 4, 4), (3072, 1, 768, 192))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 12288, grid=grid(12288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf2, stride=(1, 1), padding=(0, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 192, 4, 4), (3072, 1, 768, 192))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf11, primals_9, 12288, grid=grid(12288), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(3, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 192, 4, 4), (3072, 1, 768, 192))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 12288, grid=grid(12288), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 192, 1, 1), (192, 1, 192, 192))
buf18 = empty_strided_cuda((4, 516, 1, 1), (516, 1, 1, 1), torch.float32)
buf15 = reinterpret_tensor(buf18, (4, 4, 1, 1), (516, 1, 1, 1), 512) # alias
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf0, buf15, 16, grid=grid(16), stream=stream0)
buf16 = reinterpret_tensor(buf18, (4, 320, 1, 1), (516, 1, 1, 1), 0) # alias
buf20 = empty_strided_cuda((4, 320, 1, 1), (320, 1, 320, 320), torch.bool)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_6.run(buf7, primals_5, buf16, buf20, 1280, grid=grid(1280), stream=stream0)
del buf7
del primals_5
buf17 = reinterpret_tensor(buf18, (4, 192, 1, 1), (516, 1, 1, 1), 320) # alias
buf19 = empty_strided_cuda((4, 192, 1, 1), (192, 1, 192, 192), torch.bool)
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf14, primals_13, buf17, buf19, 768, grid=grid(768), stream=stream0)
del buf14
del primals_13
return (buf18, primals_1, buf0, buf1, primals_6, buf2, buf3, buf4, buf6, buf9, buf11, buf13, buf19, buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((192, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((320, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((192, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((192, 192, 1, 7), (1344, 7, 7, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((192, 192, 7, 1), (1344, 7, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((192, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class GridReduction2(nn.Module):
def __init__(self, in_channels):
super(GridReduction2, self).__init__()
self.branch3x3_1 = Conv2d(in_channels, 192, 1)
self.branch3x3_2 = Conv2d(192, 320, 3, stride=2)
self.branch7x7x3_1 = Conv2d(in_channels, 192, 1)
self.branch7x7x3_2 = Conv2d(192, 192, (1, 7), padding=(0, 3))
self.branch7x7x3_3 = Conv2d(192, 192, (7, 1), padding=(3, 0))
self.branch7x7x3_4 = Conv2d(192, 192, 3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
branches = [branch3x3, branch7x7x3, branch_pool]
outputs = torch.cat(branches, 1)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 192 * x2 + 1728 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 192 * x2 + 1344 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 192 * x2 + 1728 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + (x0 + 516 * x1), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 320
x1 = xindex // 320
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 516 * x1), tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
x1 = xindex // 192
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 516 * x1), tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (192,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (320, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_5, (320,), (1,))
assert_size_stride(primals_6, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (192,), (1,))
assert_size_stride(primals_8, (192, 192, 1, 7), (1344, 7, 7, 1))
assert_size_stride(primals_9, (192,), (1,))
assert_size_stride(primals_10, (192, 192, 7, 1), (1344, 7, 1, 1))
assert_size_stride(primals_11, (192,), (1,))
assert_size_stride(primals_12, (192, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_13, (192,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((320, 192, 3, 3), (1728, 1, 576, 192),
torch.float32)
triton_poi_fused_1[grid(61440, 9)](primals_4, buf1, 61440, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((192, 192, 1, 7), (1344, 1, 1344, 192),
torch.float32)
triton_poi_fused_2[grid(36864, 7)](primals_8, buf2, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((192, 192, 7, 1), (1344, 1, 192, 192),
torch.float32)
triton_poi_fused_2[grid(36864, 7)](primals_10, buf3, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((192, 192, 3, 3), (1728, 1, 576, 192),
torch.float32)
triton_poi_fused_3[grid(36864, 9)](primals_12, buf4, 36864, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf5 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 192, 4, 4), (3072, 1, 768, 192))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_4[grid(12288)](buf6, primals_2,
12288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 320, 1, 1), (320, 1, 320, 320))
buf8 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 192, 4, 4), (3072, 1, 768, 192))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(12288)](buf9, primals_7,
12288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, buf2, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 192, 4, 4), (3072, 1, 768, 192))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(12288)](buf11, primals_9,
12288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf12 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 192, 4, 4), (3072, 1, 768, 192))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(12288)](buf13, primals_11,
12288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 192, 1, 1), (192, 1, 192, 192))
buf18 = empty_strided_cuda((4, 516, 1, 1), (516, 1, 1, 1), torch.
float32)
buf15 = reinterpret_tensor(buf18, (4, 4, 1, 1), (516, 1, 1, 1), 512)
triton_poi_fused_max_pool2d_with_indices_5[grid(16)](buf0, buf15,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = reinterpret_tensor(buf18, (4, 320, 1, 1), (516, 1, 1, 1), 0)
buf20 = empty_strided_cuda((4, 320, 1, 1), (320, 1, 320, 320),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(1280)](buf7
, primals_5, buf16, buf20, 1280, XBLOCK=128, num_warps=4,
num_stages=1)
del buf7
del primals_5
buf17 = reinterpret_tensor(buf18, (4, 192, 1, 1), (516, 1, 1, 1), 320)
buf19 = empty_strided_cuda((4, 192, 1, 1), (192, 1, 192, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(768)](buf14
, primals_13, buf17, buf19, 768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf14
del primals_13
return (buf18, primals_1, buf0, buf1, primals_6, buf2, buf3, buf4, buf6,
buf9, buf11, buf13, buf19, buf20)
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class GridReduction2New(nn.Module):
def __init__(self, in_channels):
super(GridReduction2New, self).__init__()
self.branch3x3_1 = Conv2d(in_channels, 192, 1)
self.branch3x3_2 = Conv2d(192, 320, 3, stride=2)
self.branch7x7x3_1 = Conv2d(in_channels, 192, 1)
self.branch7x7x3_2 = Conv2d(192, 192, (1, 7), padding=(0, 3))
self.branch7x7x3_3 = Conv2d(192, 192, (7, 1), padding=(3, 0))
self.branch7x7x3_4 = Conv2d(192, 192, 3, stride=2)
def forward(self, input_0):
primals_1 = self.branch3x3_1.conv.weight
primals_2 = self.branch3x3_1.conv.bias
primals_4 = self.branch3x3_2.conv.weight
primals_5 = self.branch3x3_2.conv.bias
primals_6 = self.branch7x7x3_1.conv.weight
primals_7 = self.branch7x7x3_1.conv.bias
primals_8 = self.branch7x7x3_2.conv.weight
primals_9 = self.branch7x7x3_2.conv.bias
primals_10 = self.branch7x7x3_3.conv.weight
primals_11 = self.branch7x7x3_3.conv.bias
primals_12 = self.branch7x7x3_4.conv.weight
primals_13 = self.branch7x7x3_4.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| Hiroaki-Ozaki/modelib-classification | GridReduction2 | false | 17,400 | [
"WTFPL"
] | 10 | 11077704cc0bc9a42fc4b94da60b57d31ff0f65c | https://github.com/Hiroaki-Ozaki/modelib-classification/tree/11077704cc0bc9a42fc4b94da60b57d31ff0f65c |
MetaPathAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/no/cnormoom4vemaczexrglp6hw3zouqcdtfv7vslevaddk7eofheq6.py
# Topologically Sorted Source Nodes: [inputs, inputs_1, inputs_2, inputs_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# inputs => cat
# inputs_1 => cat_1
# inputs_2 => cat_3
# inputs_3 => cat_5
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_3, %select], 1), kwargs = {})
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_3, %select_1], 1), kwargs = {})
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_3, %select_2], 1), kwargs = {})
# %cat_5 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_3, %select_3], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((16*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tmp17 = tl.load(in_ptr2 + (4 + (16*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp9, tmp10, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tmp20 = tl.load(in_ptr2 + (8 + (16*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = tl.load(in_ptr2 + (12 + (16*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.where(tmp9, tmp10, tmp23)
tmp25 = tl.where(tmp4, tmp5, tmp24)
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp19, xmask)
tl.store(out_ptr2 + (x2), tmp22, xmask)
tl.store(out_ptr3 + (x2), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xc/cxccpp4pfcj2tvplmtg246qxxhyacznqv7yzveooteee3mzaduqu.py
# Topologically Sorted Source Nodes: [output_1, tmp_output_1, tmp_output_5, tmp_output_9], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# output_1 => relu
# tmp_output_1 => relu_2
# tmp_output_5 => relu_4
# tmp_output_9 => relu_6
# Graph fragment:
# %add_tensor_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_7, %primals_5), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_7,), kwargs = {})
# %add_tensor_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_5, %primals_5), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_5,), kwargs = {})
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_5), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_5), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + (x2), xmask)
tmp8 = tl.load(in_out_ptr2 + (x2), xmask)
tmp11 = tl.load(in_out_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp9 = tmp8 + tmp1
tmp10 = triton_helpers.maximum(tmp3, tmp9)
tmp12 = tmp11 + tmp1
tmp13 = triton_helpers.maximum(tmp3, tmp12)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(in_out_ptr1 + (x2), tmp7, xmask)
tl.store(in_out_ptr2 + (x2), tmp10, xmask)
tl.store(in_out_ptr3 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lr/clrlaqwj2l5gyb2xbq6i6jqggpc6ldbfvlroqkyf4abdr6vd2rsm.py
# Topologically Sorted Source Nodes: [output_6], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# output_6 => cat_6
# Graph fragment:
# %cat_6 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_4, %relu_7], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp12 = tl.load(in_ptr1 + (0))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp11 + tmp13
tmp15 = tl.full([1], 0, tl.int32)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tmp19 = tmp0 >= tmp8
tmp20 = tmp19 & tmp7
tmp21 = tl.load(in_ptr2 + (x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 + tmp13
tmp23 = triton_helpers.maximum(tmp15, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = tl.where(tmp9, tmp18, tmp25)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp7, tmp26, tmp27)
tmp29 = tmp0 >= tmp5
tmp30 = tmp29 & tmp4
tmp31 = tl.load(in_ptr3 + (x1), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp13
tmp33 = triton_helpers.maximum(tmp15, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.where(tmp6, tmp28, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp4, tmp36, tmp37)
tmp39 = tmp0 >= tmp3
tmp40 = tl.full([1], 4, tl.int64)
tmp41 = tmp0 < tmp40
tmp42 = tl.load(in_ptr4 + (x1), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 + tmp13
tmp44 = triton_helpers.maximum(tmp15, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp39, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp38, tmp46)
tl.store(out_ptr0 + (x2), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/us/cusyqra2bg2vbuuxs26cz3sihdy7z4jtgux6v3orbezw7iu5eot5.py
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# atten => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%cat_6, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat_6, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wg/cwgoxrbhmwbtk44l4btvoekxvdbagjk6vkr5n42rshizrndijbnh.py
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# atten => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lg/clgbr6qimnjuawbtwaxdsg75co76bfjvwzyhtmh2z7ldcz2dig2z.py
# Topologically Sorted Source Nodes: [mul, output_7], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# output_7 => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %unsqueeze), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_5 = async_compile.triton('triton_poi_fused_mul_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uu/cuuqqsdnnzfwctzifvpjb64xhwvvpnulkhx7vgma42256o4rbwt6.py
# Topologically Sorted Source Nodes: [output_3, tmp_output_3, tmp_output_7, tmp_output_11], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_3 => relu_1
# tmp_output_11 => relu_7
# tmp_output_3 => relu_3
# tmp_output_7 => relu_5
# Graph fragment:
# %add_tensor_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_6, %primals_7), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_6,), kwargs = {})
# %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_7), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_4,), kwargs = {})
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_7), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
# %le_6 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*i1', 7: '*i1', 8: '*i1', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp8 = tl.load(in_ptr2 + (x0), xmask)
tmp12 = tl.load(in_ptr3 + (x0), xmask)
tmp16 = tl.load(in_ptr4 + (x0), xmask)
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tmp9 = tmp8 + tmp2
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp11 = tmp10 <= tmp6
tmp13 = tmp12 + tmp2
tmp14 = triton_helpers.maximum(tmp4, tmp13)
tmp15 = tmp14 <= tmp6
tmp17 = tmp16 + tmp2
tmp18 = triton_helpers.maximum(tmp4, tmp17)
tmp19 = tmp18 <= tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
tl.store(out_ptr2 + (x0), tmp15, xmask)
tl.store(out_ptr3 + (x0), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 12), (12, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf8 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [inputs, inputs_1, inputs_2, inputs_3], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_3, primals_1, buf0, buf4, buf8, buf12, 48, grid=grid(48), stream=stream0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf12, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf13)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf5)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf9)
del primals_4
buf2 = buf1; del buf1 # reuse
buf6 = buf5; del buf5 # reuse
buf10 = buf9; del buf9 # reuse
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [output_1, tmp_output_1, tmp_output_5, tmp_output_9], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, buf6, buf10, buf14, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf3)
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf6, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf7)
buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf11)
buf15 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf14, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_6], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, primals_7, buf7, buf11, buf15, buf16, 16, grid=grid(16), stream=stream0)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf16, buf17, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf17, buf18, 16, grid=grid(16), stream=stream0)
buf19 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [mul, output_7], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_5.run(primals_1, buf18, buf19, 16, grid=grid(16), stream=stream0)
del buf18
buf20 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf21 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf22 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf23 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_3, tmp_output_3, tmp_output_7, tmp_output_11], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_6.run(buf15, primals_7, buf11, buf7, buf3, buf20, buf21, buf22, buf23, 4, grid=grid(4), stream=stream0)
del buf11
del buf15
del buf3
del buf7
del primals_7
return (buf19, primals_1, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf20, primals_6, buf21, buf22, buf23, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MetaPathAttention(nn.Module):
def __init__(self, att_size, latent_dim, metapath_type_num):
super(MetaPathAttention, self).__init__()
self.att_size = att_size
self.latent_dim = latent_dim
self.metapath_type_num = metapath_type_num
self.dense_layer_1 = nn.Linear(in_features=latent_dim * 3,
out_features=att_size)
self.dense_layer_2 = nn.Linear(in_features=att_size, out_features=1)
nn.init.xavier_normal_(self.dense_layer_1.weight.data)
nn.init.xavier_normal_(self.dense_layer_2.weight.data)
self.lam1 = lambda x, index: x[:, index, :]
self.lam2 = lambda x: F.softmax(x, dim=1)
self.lam3 = lambda metapath_latent, atten: torch.sum(
metapath_latent * torch.unsqueeze(atten, -1), 1)
def forward(self, user_latent, item_latent, metapath_latent):
metapath = self.lam1(metapath_latent, 0)
inputs = torch.cat((user_latent, item_latent, metapath), 1)
output = self.dense_layer_1(inputs)
output = F.relu(output)
output = self.dense_layer_2(output)
output = F.relu(output)
for i in range(1, self.metapath_type_num):
metapath = self.lam1(metapath_latent, i)
inputs = torch.cat((user_latent, item_latent, metapath), 1)
tmp_output = self.dense_layer_1(inputs)
tmp_output = F.relu(tmp_output)
tmp_output = self.dense_layer_2(tmp_output)
tmp_output = F.relu(tmp_output)
output = torch.cat((output, tmp_output), 1)
atten = self.lam2(output)
output = self.lam3(metapath_latent, atten)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'att_size': 4, 'latent_dim': 4, 'metapath_type_num': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (16 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tmp17 = tl.load(in_ptr2 + (4 + 16 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp9, tmp10, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tmp20 = tl.load(in_ptr2 + (8 + 16 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = tl.load(in_ptr2 + (12 + 16 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.where(tmp9, tmp10, tmp23)
tmp25 = tl.where(tmp4, tmp5, tmp24)
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp19, xmask)
tl.store(out_ptr2 + x2, tmp22, xmask)
tl.store(out_ptr3 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_out_ptr1, in_out_ptr2,
in_out_ptr3, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp8 = tl.load(in_out_ptr2 + x2, xmask)
tmp11 = tl.load(in_out_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp9 = tmp8 + tmp1
tmp10 = triton_helpers.maximum(tmp3, tmp9)
tmp12 = tmp11 + tmp1
tmp13 = triton_helpers.maximum(tmp3, tmp12)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
tl.store(in_out_ptr2 + x2, tmp10, xmask)
tl.store(in_out_ptr3 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp12 = tl.load(in_ptr1 + 0)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + x1, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp11 + tmp13
tmp15 = tl.full([1], 0, tl.int32)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tmp19 = tmp0 >= tmp8
tmp20 = tmp19 & tmp7
tmp21 = tl.load(in_ptr2 + x1, tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp21 + tmp13
tmp23 = triton_helpers.maximum(tmp15, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = tl.where(tmp9, tmp18, tmp25)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp7, tmp26, tmp27)
tmp29 = tmp0 >= tmp5
tmp30 = tmp29 & tmp4
tmp31 = tl.load(in_ptr3 + x1, tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp31 + tmp13
tmp33 = triton_helpers.maximum(tmp15, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.where(tmp6, tmp28, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp4, tmp36, tmp37)
tmp39 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp42 = tl.load(in_ptr4 + x1, tmp39 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp43 = tmp42 + tmp13
tmp44 = triton_helpers.maximum(tmp15, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp39, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp38, tmp46)
tl.store(out_ptr0 + x2, tmp47, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp8 = tl.load(in_ptr2 + x0, xmask)
tmp12 = tl.load(in_ptr3 + x0, xmask)
tmp16 = tl.load(in_ptr4 + x0, xmask)
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tmp9 = tmp8 + tmp2
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp11 = tmp10 <= tmp6
tmp13 = tmp12 + tmp2
tmp14 = triton_helpers.maximum(tmp4, tmp13)
tmp15 = tmp14 <= tmp6
tmp17 = tmp16 + tmp2
tmp18 = triton_helpers.maximum(tmp4, tmp17)
tmp19 = tmp18 <= tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
tl.store(out_ptr3 + x0, tmp19, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 12), (12, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf8 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(48)](primals_2, primals_3, primals_1,
buf0, buf4, buf8, buf12, 48, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1,
12), 0), out=buf1)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_4, (12, 4), (1,
12), 0), out=buf13)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (12, 4), (1,
12), 0), out=buf5)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_4, (12, 4), (1,
12), 0), out=buf9)
del primals_4
buf2 = buf1
del buf1
buf6 = buf5
del buf5
buf10 = buf9
del buf9
buf14 = buf13
del buf13
triton_poi_fused_relu_1[grid(16)](buf2, buf6, buf10, buf14,
primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_6, (4, 1), (1, 4
), 0), out=buf3)
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_6, (4, 1), (1, 4
), 0), out=buf7)
buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (4, 1), (1,
4), 0), out=buf11)
buf15 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf14, reinterpret_tensor(primals_6, (4, 1), (1,
4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_cat_2[grid(16)](buf3, primals_7, buf7, buf11,
buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf16, buf17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf17, buf18, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf19 = buf17
del buf17
triton_poi_fused_mul_sum_5[grid(16)](primals_1, buf18, buf19, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf18
buf20 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf21 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf22 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf23 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_6[grid(4)](buf15,
primals_7, buf11, buf7, buf3, buf20, buf21, buf22, buf23, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del buf11
del buf15
del buf3
del buf7
del primals_7
return (buf19, primals_1, buf0, buf2, buf4, buf6, buf8, buf10, buf12,
buf14, buf16, buf20, primals_6, buf21, buf22, buf23)
class MetaPathAttentionNew(nn.Module):
def __init__(self, att_size, latent_dim, metapath_type_num):
super(MetaPathAttentionNew, self).__init__()
self.att_size = att_size
self.latent_dim = latent_dim
self.metapath_type_num = metapath_type_num
self.dense_layer_1 = nn.Linear(in_features=latent_dim * 3,
out_features=att_size)
self.dense_layer_2 = nn.Linear(in_features=att_size, out_features=1)
nn.init.xavier_normal_(self.dense_layer_1.weight.data)
nn.init.xavier_normal_(self.dense_layer_2.weight.data)
self.lam1 = lambda x, index: x[:, index, :]
self.lam2 = lambda x: F.softmax(x, dim=1)
self.lam3 = lambda metapath_latent, atten: torch.sum(
metapath_latent * torch.unsqueeze(atten, -1), 1)
def forward(self, input_0, input_1, input_2):
primals_4 = self.dense_layer_1.weight
primals_5 = self.dense_layer_1.bias
primals_6 = self.dense_layer_2.weight
primals_7 = self.dense_layer_2.bias
primals_2 = input_0
primals_3 = input_1
primals_1 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Hui-Li/MCRec_PyTorch | MetaPathAttention | false | 17,401 | [
"MIT"
] | 9 | da4da77d2cade40c0a1961481c8e47ac396d12ee | https://github.com/Hui-Li/MCRec_PyTorch/tree/da4da77d2cade40c0a1961481c8e47ac396d12ee |
DummyEmbedder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/dp/cdpz7t32iljpldu47tqh7pmuwdqxergqgv4a2rt5ogv5nqrz2xtq.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat_addmm
# Graph fragment:
# %cat_addmm : [num_users=2] = call_function[target=torch._inductor.fx_passes.post_grad.cat_addmm](args = (((%primals_3, %unsqueeze, %permute), (%primals_5, %unsqueeze_1, %permute_1), (%primals_7, %unsqueeze_2, %permute_2), (%primals_9, %unsqueeze_3, %permute_3)), 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wj/cwjsobbw2c3kohxjc4m6imts234a6dg2mfzfiprqovzseczzu6sx.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat_addmm
# Graph fragment:
# %cat_addmm : [num_users=2] = call_function[target=torch._inductor.fx_passes.post_grad.cat_addmm](args = (((%primals_3, %unsqueeze, %permute), (%primals_5, %unsqueeze_1, %permute_1), (%primals_7, %unsqueeze_2, %permute_2), (%primals_9, %unsqueeze_3, %permute_3)), 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xe/cxe3uj36rtgooeqyq5yxarm6zaqyv36ttmwy7bjgudssek7wwwsk.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat_addmm
# Graph fragment:
# %cat_addmm : [num_users=2] = call_function[target=torch._inductor.fx_passes.post_grad.cat_addmm](args = (((%primals_3, %unsqueeze, %permute), (%primals_5, %unsqueeze_1, %permute_1), (%primals_7, %unsqueeze_2, %permute_2), (%primals_9, %unsqueeze_3, %permute_3)), 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nt/cnt7vjnxjdogsjhnnje3hx65z4rqpzprlfcs2su6dp7hed64ag7w.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat_addmm
# Graph fragment:
# %cat_addmm : [num_users=2] = call_function[target=torch._inductor.fx_passes.post_grad.cat_addmm](args = (((%primals_3, %unsqueeze, %permute), (%primals_5, %unsqueeze_1, %permute_1), (%primals_7, %unsqueeze_2, %permute_2), (%primals_9, %unsqueeze_3, %permute_3)), 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 1), (1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 16), (16, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
buf1 = reinterpret_tensor(buf8, (4, 4), (16, 1), 0) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
extern_kernels.addmm(primals_3, buf0, reinterpret_tensor(primals_2, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = reinterpret_tensor(buf8, (4, 4), (16, 1), 4) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3)
del primals_4
del primals_5
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_1, buf4, 4, grid=grid(4), stream=stream0)
buf5 = reinterpret_tensor(buf8, (4, 4), (16, 1), 8) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf5)
del primals_6
del primals_7
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(primals_1, buf6, 4, grid=grid(4), stream=stream0)
buf7 = reinterpret_tensor(buf8, (4, 4), (16, 1), 12) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
extern_kernels.addmm(primals_9, buf6, reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf7)
del buf6
del primals_8
del primals_9
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [temporal_embeddings], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf8, reinterpret_tensor(primals_10, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf9)
del primals_11
return (buf9, reinterpret_tensor(primals_1, (4, 1), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (4, 1), 1), reinterpret_tensor(primals_1, (4, 1), (4, 1), 2), reinterpret_tensor(primals_1, (4, 1), (4, 1), 3), buf8, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DummyEmbedder(nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.embedding_dim = embedding_dim
self.day_embedding = nn.Linear(1, embedding_dim)
self.week_embedding = nn.Linear(1, embedding_dim)
self.month_embedding = nn.Linear(1, embedding_dim)
self.year_embedding = nn.Linear(1, embedding_dim)
self.dummy_fusion = nn.Linear(embedding_dim * 4, embedding_dim)
self.dropout = nn.Dropout(0.2)
def forward(self, temporal_features):
d, w, m, y = temporal_features[:, 0].unsqueeze(1), temporal_features[
:, 1].unsqueeze(1), temporal_features[:, 2].unsqueeze(1
), temporal_features[:, 3].unsqueeze(1)
d_emb, w_emb, m_emb, y_emb = self.day_embedding(d
), self.week_embedding(w), self.month_embedding(m
), self.year_embedding(y)
temporal_embeddings = self.dummy_fusion(torch.cat([d_emb, w_emb,
m_emb, y_emb], dim=1))
temporal_embeddings = self.dropout(temporal_embeddings)
return temporal_embeddings
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embedding_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 1), (1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 16), (16, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(4)](primals_1, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
buf1 = reinterpret_tensor(buf8, (4, 4), (16, 1), 0)
extern_kernels.addmm(primals_3, buf0, reinterpret_tensor(primals_2,
(1, 4), (1, 1), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = buf0
del buf0
triton_poi_fused_cat_1[grid(4)](primals_1, buf2, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf8, (4, 4), (16, 1), 4)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3)
del primals_4
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_cat_2[grid(4)](primals_1, buf4, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf8, (4, 4), (16, 1), 8)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(1, 4), (1, 1), 0), alpha=1, beta=1, out=buf5)
del primals_6
del primals_7
buf6 = buf4
del buf4
triton_poi_fused_cat_3[grid(4)](primals_1, buf6, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf8, (4, 4), (16, 1), 12)
extern_kernels.addmm(primals_9, buf6, reinterpret_tensor(primals_8,
(1, 4), (1, 1), 0), alpha=1, beta=1, out=buf7)
del buf6
del primals_8
del primals_9
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf8, reinterpret_tensor(
primals_10, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf9)
del primals_11
return buf9, reinterpret_tensor(primals_1, (4, 1), (4, 1), 0
), reinterpret_tensor(primals_1, (4, 1), (4, 1), 1
), reinterpret_tensor(primals_1, (4, 1), (4, 1), 2
), reinterpret_tensor(primals_1, (4, 1), (4, 1), 3), buf8, primals_10
class DummyEmbedderNew(nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.embedding_dim = embedding_dim
self.day_embedding = nn.Linear(1, embedding_dim)
self.week_embedding = nn.Linear(1, embedding_dim)
self.month_embedding = nn.Linear(1, embedding_dim)
self.year_embedding = nn.Linear(1, embedding_dim)
self.dummy_fusion = nn.Linear(embedding_dim * 4, embedding_dim)
self.dropout = nn.Dropout(0.2)
def forward(self, input_0):
primals_2 = self.day_embedding.weight
primals_3 = self.day_embedding.bias
primals_4 = self.week_embedding.weight
primals_5 = self.week_embedding.bias
primals_6 = self.month_embedding.weight
primals_7 = self.month_embedding.bias
primals_8 = self.year_embedding.weight
primals_9 = self.year_embedding.bias
primals_10 = self.dummy_fusion.weight
primals_11 = self.dummy_fusion.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| HumaticsLAB/GTM-Transformer | DummyEmbedder | false | 17,402 | [
"MIT"
] | 7 | 94124d3246c7c22d8b952beeda53639a9ad170e3 | https://github.com/HumaticsLAB/GTM-Transformer/tree/94124d3246c7c22d8b952beeda53639a9ad170e3 |
InceptionAux | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/na/cnaaw5nwcvgrwoc4wqs5r4hv2rrebac633f2bdriaccfezuniltm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 98304
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zi/czihd6ui5q727j4nznhmxojx6s5zlzsrg4bya4fxyqr4kmvncdgo.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [5, 5], [3, 3]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 512], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 25, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 400
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 20
x3 = (xindex // 20)
y4 = yindex
x5 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + ((3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (65 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (66 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (67 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (68 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (128 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (129 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (130 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (131 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (132 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (192 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (193 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (194 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (195 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (196 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (256 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (257 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (258 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (259 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (260 + (3*x2) + (192*x3) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + (y0 + (4*x5) + (1600*y1)), tmp50, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/w5/cw5plj6zt63pio7ovt3onhixmzlvy32deenx2pdzl7f4dcvgmxvb.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 204800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xf/cxf6zb26osowdgr2yqptetseo223ylocukqd24x34c26hrwa7sfa.py
# Topologically Sorted Source Nodes: [x_3, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.mean]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# x_5 => mean
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_1, [-1, -2], True), kwargs = {})
triton_red_fused_convolution_mean_relu_3 = async_compile.triton('triton_red_fused_convolution_mean_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[8192, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_convolution_mean_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_convolution_mean_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 6144
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 768
x1 = (xindex // 768)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
_tmp6 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (768*r2) + (98304*x1)), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = _tmp6 + tmp5
_tmp6 = tl.where(rmask, tmp7, _tmp6)
tmp6 = tl.sum(_tmp6, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zn/cznb3cdjxtocdcmfuntbvsfqlssml53ze4ugzvro2psnbdasdwgz.py
# Topologically Sorted Source Nodes: [x_3, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.mean]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# x_5 => mean
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_1, [-1, -2], True), kwargs = {})
triton_per_fused_convolution_mean_relu_4 = async_compile.triton('triton_per_fused_convolution_mean_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4096, 2],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_mean_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_mean_relu_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 3072
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 768
x1 = (xindex // 768)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (768*r2) + (1536*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 256.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hm/chmpmln453d7hdva2ibnkimgfhdlz5pls7gnimhvm2z3yyjhfhar.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 768
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (128, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (128, ), (1, ))
assert_size_stride(primals_4, (768, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_5, (768, ), (1, ))
assert_size_stride(primals_6, (4, 768), (768, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 98304, 25, grid=grid(98304, 25), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((4, 4, 20, 20), (1600, 1, 80, 4), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(primals_1, buf1, 16, 400, grid=grid(16, 400), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 128, 20, 20), (51200, 1, 2560, 128))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf3, primals_3, 204800, grid=grid(204800), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf5 = empty_strided_cuda((4, 768, 1, 1, 2), (1536, 1, 6144, 6144, 768), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.mean]
triton_red_fused_convolution_mean_relu_3.run(buf4, primals_5, buf5, 6144, 128, grid=grid(6144), stream=stream0)
buf6 = empty_strided_cuda((4, 768, 1, 1), (768, 1, 3072, 3072), torch.float32)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.mean]
triton_per_fused_convolution_mean_relu_4.run(buf7, buf5, 3072, 2, grid=grid(3072), stream=stream0)
del buf5
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (4, 768), (768, 1), 0), reinterpret_tensor(primals_6, (768, 4), (1, 768), 0), alpha=1, beta=1, out=buf8)
del primals_7
buf9 = empty_strided_cuda((4, 768, 16, 16), (196608, 1, 12288, 768), torch.bool)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_5.run(buf4, primals_5, buf9, 786432, grid=grid(786432), stream=stream0)
del buf4
del primals_5
return (buf8, primals_2, buf0, buf1, buf3, reinterpret_tensor(buf7, (4, 768), (768, 1), 0), primals_6, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((768, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 768), (768, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = Conv2d(in_channels, 128, kernel_size=1)
self.conv1 = Conv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
x = F.avg_pool2d(x, kernel_size=5, stride=3)
x = self.conv0(x)
x = self.conv1(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 400
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 20
x3 = xindex // 20
y4 = yindex
x5 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (3 * x2 + 192 * x3 + 4096 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (65 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (66 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (67 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (68 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (128 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (129 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (130 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (131 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (132 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (192 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (193 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (194 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (195 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (196 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (256 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (257 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (258 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (259 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (260 + 3 * x2 + 192 * x3 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + (y0 + 4 * x5 + 1600 * y1), tmp50, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_red_fused_convolution_mean_relu_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 768
x1 = xindex // 768
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
_tmp6 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 768 * r2 + 98304 * x1), rmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = _tmp6 + tmp5
_tmp6 = tl.where(rmask, tmp7, _tmp6)
tmp6 = tl.sum(_tmp6, 1)[:, None]
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_per_fused_convolution_mean_relu_4(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 3072
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 768
x1 = xindex // 768
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 768 * r2 + 1536 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 256.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 768
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (128, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (768, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_5, (768,), (1,))
assert_size_stride(primals_6, (4, 768), (768, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(98304, 25)](primals_4, buf0, 98304, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((4, 4, 20, 20), (1600, 1, 80, 4), torch.
float32)
triton_poi_fused_avg_pool2d_1[grid(16, 400)](primals_1, buf1, 16,
400, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 128, 20, 20), (51200, 1, 2560, 128))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_2[grid(204800)](buf3, primals_3,
204800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf5 = empty_strided_cuda((4, 768, 1, 1, 2), (1536, 1, 6144, 6144,
768), torch.float32)
triton_red_fused_convolution_mean_relu_3[grid(6144)](buf4,
primals_5, buf5, 6144, 128, XBLOCK=64, RBLOCK=8, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 768, 1, 1), (768, 1, 3072, 3072),
torch.float32)
buf7 = buf6
del buf6
triton_per_fused_convolution_mean_relu_4[grid(3072)](buf7, buf5,
3072, 2, XBLOCK=128, num_warps=2, num_stages=1)
del buf5
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (4, 768),
(768, 1), 0), reinterpret_tensor(primals_6, (768, 4), (1, 768),
0), alpha=1, beta=1, out=buf8)
del primals_7
buf9 = empty_strided_cuda((4, 768, 16, 16), (196608, 1, 12288, 768),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(786432)](
buf4, primals_5, buf9, 786432, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf4
del primals_5
return buf8, primals_2, buf0, buf1, buf3, reinterpret_tensor(buf7, (4,
768), (768, 1), 0), primals_6, buf9
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class InceptionAuxNew(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAuxNew, self).__init__()
self.conv0 = Conv2d(in_channels, 128, kernel_size=1)
self.conv1 = Conv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, input_0):
primals_2 = self.conv0.conv.weight
primals_3 = self.conv0.conv.bias
primals_4 = self.conv1.conv.weight
primals_5 = self.conv1.conv.bias
primals_6 = self.fc.weight
primals_7 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Hiroaki-Ozaki/modelib-classification | InceptionAux | false | 17,403 | [
"WTFPL"
] | 10 | 11077704cc0bc9a42fc4b94da60b57d31ff0f65c | https://github.com/Hiroaki-Ozaki/modelib-classification/tree/11077704cc0bc9a42fc4b94da60b57d31ff0f65c |
Gate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/zm/czm6acrrgjryz6xi3wza7npycjuiqsdsygpfdo3lbzaquecrmeuj.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bc/cbclblxs22iq5iwnksvy2oyatex57njhowtq7an6p7eef7rnlqbj.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp4, tmp10, tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mk/cmk3gycw3o64i55sjo4uruzoje7ubvbwaa25aojoct2uqzmpbt3f.py
# Topologically Sorted Source Nodes: [z, h_tilde, sub, mul_1, mul_2, h_new], Original ATen: [aten.sigmoid, aten.tanh, aten.rsub, aten.mul, aten.add]
# Source node to ATen node mapping:
# h_new => add
# h_tilde => tanh
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# z => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%addmm_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp5 + tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf2, primals_1, primals_2, buf3, 32, grid=grid(32), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [z, h_tilde, sub, mul_1, mul_2, h_new], Original ATen: [aten.sigmoid, aten.tanh, aten.rsub, aten.mul, aten.add]
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2.run(buf1, primals_2, buf4, buf5, 16, grid=grid(16), stream=stream0)
return (buf5, primals_1, primals_2, buf0, buf1, buf2, buf3, buf4, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Gate(nn.Module):
def __init__(self, dhid, dfeature, init_range=0.1, init_dist='uniform',
dropout=0.5):
super(Gate, self).__init__()
self.dhid = dhid
self.dfeature = dfeature
self.linear_z = nn.Linear(self.dhid + self.dfeature, self.dhid)
self.linear_r = nn.Linear(self.dhid + self.dfeature, self.dfeature)
self.linear_h_tilde = nn.Linear(self.dhid + self.dfeature, self.dhid)
self.drop = nn.Dropout(dropout)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.init_weights(init_range, init_dist)
def init_weights(self, init_range, init_dist):
def init_w(data, init_dist):
if init_dist == 'uniform':
return data.uniform_(-init_range, init_range)
elif init_dist == 'xavier':
return nn.init.xavier_uniform(data)
init_w(self.linear_z.weight.data, init_dist)
init_w(self.linear_r.weight.data, init_dist)
init_w(self.linear_h_tilde.weight.data, init_dist)
self.linear_z.bias.data.fill_(0)
self.linear_r.bias.data.fill_(0)
self.linear_h_tilde.bias.data.fill_(0)
def forward(self, h, features):
z = self.sigmoid(self.linear_z(torch.cat((features, h), dim=1)))
r = self.sigmoid(self.linear_r(torch.cat((features, h), dim=1)))
h_tilde = self.tanh(self.linear_h_tilde(torch.cat((torch.mul(r,
features), h), dim=1)))
h_new = torch.mul(1 - z, h) + torch.mul(z, h_tilde)
h_new = self.drop(h_new)
return h_new
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dhid': 4, 'dfeature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp4, tmp10, tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp5 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](buf2, primals_1, primals_2, buf3,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2[grid(16)](buf1,
primals_2, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf5, primals_1, primals_2, buf0, buf1, buf2, buf3, buf4, primals_7
class GateNew(nn.Module):
def __init__(self, dhid, dfeature, init_range=0.1, init_dist='uniform',
dropout=0.5):
super(GateNew, self).__init__()
self.dhid = dhid
self.dfeature = dfeature
self.linear_z = nn.Linear(self.dhid + self.dfeature, self.dhid)
self.linear_r = nn.Linear(self.dhid + self.dfeature, self.dfeature)
self.linear_h_tilde = nn.Linear(self.dhid + self.dfeature, self.dhid)
self.drop = nn.Dropout(dropout)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.init_weights(init_range, init_dist)
def init_weights(self, init_range, init_dist):
def init_w(data, init_dist):
if init_dist == 'uniform':
return data.uniform_(-init_range, init_range)
elif init_dist == 'xavier':
return nn.init.xavier_uniform(data)
init_w(self.linear_z.weight.data, init_dist)
init_w(self.linear_r.weight.data, init_dist)
init_w(self.linear_h_tilde.weight.data, init_dist)
self.linear_z.bias.data.fill_(0)
self.linear_r.bias.data.fill_(0)
self.linear_h_tilde.bias.data.fill_(0)
def forward(self, input_0, input_1):
primals_3 = self.linear_z.weight
primals_4 = self.linear_z.bias
primals_5 = self.linear_r.weight
primals_6 = self.linear_r.bias
primals_7 = self.linear_h_tilde.weight
primals_8 = self.linear_h_tilde.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| Hunter-DDM/DeFT-naacl2021 | Gate | false | 17,404 | [
"MIT"
] | 6 | c61aeb4f63a650a0a1b71fb1b0b245cb3925009b | https://github.com/Hunter-DDM/DeFT-naacl2021/tree/c61aeb4f63a650a0a1b71fb1b0b245cb3925009b |
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/om/comoiihvstixgtkpwua6qkwhcfg47ct372uimltlszxfbpfxya3t.py
# Topologically Sorted Source Nodes: [mean, var, add, std, sub, mul, add_1, truediv], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# std => sqrt
# sub => sub
# truediv => div
# var => var
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 1, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = (xindex // 4)
x3 = (xindex // 64)
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask)
tmp2 = tl.load(in_ptr1 + (4*x5), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x5)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x5)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x5)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (16 + x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (32 + x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp19 / tmp9
tmp21 = tmp13 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp14 - tmp20
tmp24 = tmp23 * tmp23
tmp25 = tmp22 + tmp24
tmp26 = tmp16 - tmp20
tmp27 = tmp26 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp18 - tmp20
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = 3.0
tmp33 = tmp31 / tmp32
tmp34 = 1e-06
tmp35 = tmp33 + tmp34
tmp36 = libdevice.sqrt(tmp35)
tmp37 = tmp36 + tmp34
tmp38 = tmp12 / tmp37
tl.store(out_ptr0 + (x4), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/e2/ce2yvsltjpbbjmmuyom27m6u2wqfsmwcenk43luubofj4me6cm3r.py
# Topologically Sorted Source Nodes: [output, mul_1], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# output => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %unsqueeze), kwargs = {})
triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x3 = (xindex // 256)
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x6), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, add, std, sub, mul, add_1, truediv], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output, mul_1], Original ATen: [aten.add, aten.mul]
triton_poi_fused_add_mul_1.run(buf0, primals_3, primals_4, buf1, 1024, grid=grid(1024), stream=stream0)
del buf0
del primals_3
return (buf1, primals_1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class LayerNorm(torch.nn.Module):
def __init__(self, input_dim):
super(LayerNorm, self).__init__()
self.gamma = torch.nn.Parameter(torch.ones(input_dim))
self.beta = torch.nn.Parameter(torch.zeros(input_dim))
self.eps = 1e-06
def forward(self, x, mask):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=1, keepdim=True) + self.eps)
output = self.gamma * (x - mean) / (std + self.eps) + self.beta
return output * mask.unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = xindex // 4
x3 = xindex // 64
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x5, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x5), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x5), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + (16 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (32 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp19 / tmp9
tmp21 = tmp13 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp14 - tmp20
tmp24 = tmp23 * tmp23
tmp25 = tmp22 + tmp24
tmp26 = tmp16 - tmp20
tmp27 = tmp26 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp18 - tmp20
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = 3.0
tmp33 = tmp31 / tmp32
tmp34 = 1e-06
tmp35 = tmp33 + tmp34
tmp36 = libdevice.sqrt(tmp35)
tmp37 = tmp36 + tmp34
tmp38 = tmp12 / tmp37
tl.store(out_ptr0 + x4, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x3 = xindex // 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x6, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_add_mul_1[grid(1024)](buf0, primals_3, primals_4,
buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
return buf1, primals_1, primals_4
class LayerNormNew(torch.nn.Module):
def __init__(self, input_dim):
super(LayerNormNew, self).__init__()
self.gamma = torch.nn.Parameter(torch.ones(input_dim))
self.beta = torch.nn.Parameter(torch.zeros(input_dim))
self.eps = 1e-06
def forward(self, input_0, input_1):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| IBM/context-relevant-pruning-textrl | LayerNorm | false | 17,405 | [
"Apache-2.0"
] | 8 | c8630203af5df64c8e1e3c4624e4a158b40a5f27 | https://github.com/IBM/context-relevant-pruning-textrl/tree/c8630203af5df64c8e1e3c4624e4a158b40a5f27 |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/mp/cmpx6hmbrf4737ppb24ygqj23qq73ty6qt26rubbrdbrjmi5andb.py
# Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# tanh => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x3 = (xindex // 256)
x5 = xindex % 64
x0 = xindex % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(out_ptr0 + (x6), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4x/c4xd6y4gkp7z3srq6gzq52swaegpimvl35zpaduo4j5wyernpskh.py
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mi/cmibf5zezxd6g5fvwgrxm77t4io4cybzrauehr6ghekpfqjr2jwl.py
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/a6/ca6zzxlve653y34okq7bwpco5be2dgqgwxyb46drau2k27kq5pr2.py
# Topologically Sorted Source Nodes: [mul, attention_weighted_encoding], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# attention_weighted_encoding => sum_2
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x1 = (xindex // 4) % 16
x3 = (xindex // 256)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (64*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x1 + (64*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x1 + (64*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (48 + x1 + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp0 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp0 * tmp9
tmp11 = tmp8 + tmp10
tl.store(out_ptr0 + (x5), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [att1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(buf0, buf1, primals_5, buf2, 1024, grid=grid(1024), stream=stream0)
del primals_5
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32)
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, attention_weighted_encoding], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf0, buf6, buf7, 1024, grid=grid(1024), stream=stream0)
return (buf7, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, buf6, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Attention(nn.Module):
def __init__(self, encoder_dim, decoder_dim, attention_dim):
super(Attention, self).__init__()
self.encoder_dim = encoder_dim
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim, 1)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
def forward(self, encoder_out, decoder_hidden):
att1 = self.encoder_att(encoder_out)
att2 = self.decoder_att(decoder_hidden).squeeze(0)
att = self.full_att(self.tanh(att1 + att2.unsqueeze(1))).squeeze(2)
alpha = self.softmax(att)
attention_weighted_encoding = (att1 * alpha.unsqueeze(2)).sum(dim=1)
return attention_weighted_encoding, alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_dim': 4, 'decoder_dim': 4, 'attention_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x3 = xindex // 256
x5 = xindex % 64
x0 = xindex % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(out_ptr0 + x6, tmp5, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x1 = xindex // 4 % 16
x3 = xindex // 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (48 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp0 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp0 * tmp9
tmp11 = tmp8 + tmp10
tl.store(out_ptr0 + x5, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(1024)](buf0, buf1, primals_5, buf2,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0)
del buf1
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_sum_3[grid(1024)](buf0, buf6, buf7, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
return buf7, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), buf2, buf6, primals_7
class AttentionNew(nn.Module):
def __init__(self, encoder_dim, decoder_dim, attention_dim):
super(AttentionNew, self).__init__()
self.encoder_dim = encoder_dim
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim, 1)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0, input_1):
primals_1 = self.encoder_att.weight
primals_2 = self.encoder_att.bias
primals_4 = self.decoder_att.weight
primals_5 = self.decoder_att.bias
primals_7 = self.full_att.weight
primals_8 = self.full_att.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| HumaticsLAB/AttentionBasedMultiModalRNN | Attention | false | 17,406 | [
"MIT"
] | 5 | 0c060a97cdddf1348938a5f2d456e83e5f8bf887 | https://github.com/HumaticsLAB/AttentionBasedMultiModalRNN/tree/0c060a97cdddf1348938a5f2d456e83e5f8bf887 |
InceptionA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/fb/cfbyh7temr2u5cam5txbekh4n77qsqs7bwxugnr6jx7oljt7yy43.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6o/c6o5p2y773cwf4c7kw3c5cqy57mrviqucndkzozwacfvvwlbzp6t.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3072
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 48
y1 = (yindex // 48)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (48*x2) + (1200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wl/cwlyygqmoh2tr4odo2ftoacxoqlew4n7mxufvlgaa5sgibpvlr5s.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 6144
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4x/c4xle4aqy37wz4bwzpqato3vjq4m7xf6qhielbefbj5qmpw3ynhc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 9216
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 96
y1 = (yindex // 96)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (96*x2) + (864*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/la/cla7n2w7ndaqgmsf3kuzfnnccx5ceeqifyzrdpn37kw3rh2fd4xw.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ep/cepcwbgwh7xxf4eou5aldywnzppvtxe5u52mlrfdm3pqazjqtrxq.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_6 => convolution_3
# x_7 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ia/ciayo5oxcdjizpa7tzktlzmyolsmkssbdozrc67awcuc6cdazqx5.py
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_8 => convolution_4
# x_9 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pk/cpkwbv7re4cfzhk3fqukjk6pyognhxrlfqa3mvhbku2wyrp5ifaw.py
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# branch_pool => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_3, [3, 3], [1, 1], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_7 = async_compile.triton('triton_poi_fused_avg_pool2d_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 4
x1 = (xindex // 4) % 4
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-20) + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-16) + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-12) + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-4) + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x6), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (4 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (12 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (16 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (20 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + ((-1)*x1) + ((-1)*x2) + (x1*x2) + (((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5)))*((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))) + ((-1)*x1*((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))) + ((-1)*x2*((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5)))) + ((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5))) + ((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + (x6), tmp53, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/n5/cn5zayrvdk5spnkqrduo7wse2rsvrtta6fzwjhpmdj2qyvwkeiwn.py
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# outputs => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_2, %relu_5, %relu_6], 1), kwargs = {})
triton_poi_fused_cat_8 = async_compile.triton('triton_poi_fused_cat_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 14592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 228
x0 = xindex % 16
x2 = (xindex // 3648)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((64*x0) + (1024*x2) + x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 128, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + ((64*x0) + (1024*x2) + ((-64) + x1)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr3 + ((-64) + x1), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 224, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + ((96*x0) + (1536*x2) + ((-128) + x1)), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr5 + ((-128) + x1), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tmp33 = tl.full([1], 228, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr6 + ((4*x0) + (64*x2) + ((-224) + x1)), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr7 + ((-224) + x1), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + (x3), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5c/c5cw5vpyv7sid5lyiqp46oaknwlhnyvmmte2xrsfhmrx2lud3ya7.py
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_12 => convolution_6
# x_13 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/sm/csm5me3ubgsnwxdbt2rx75qszxsyezw5ifydlxqdsxujqnj7biqo.py
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_10 => convolution_5
# x_11 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nx/cnx2bofnl7hywmjweolelizgiz2dh6mzkaqtrny2ehdwypjsczjx.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_4 => convolution_2
# x_5 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (48, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (48, ), (1, ))
assert_size_stride(primals_6, (64, 48, 5, 5), (1200, 25, 5, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (96, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (96, ), (1, ))
assert_size_stride(primals_12, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_13, (96, ), (1, ))
assert_size_stride(primals_14, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((64, 48, 5, 5), (1200, 1, 240, 48), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 3072, 25, grid=grid(3072, 25), stream=stream0)
del primals_6
buf2 = empty_strided_cuda((96, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_10, buf2, 6144, 9, grid=grid(6144, 9), stream=stream0)
del primals_10
buf3 = empty_strided_cuda((96, 96, 3, 3), (864, 1, 288, 96), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_12, buf3, 9216, 9, grid=grid(9216, 9), stream=stream0)
del primals_12
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 1, 256, 64))
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 48, 4, 4), (768, 1, 192, 48))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf6, primals_5, 3072, grid=grid(3072), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 4, 4), (1024, 1, 256, 64))
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf0, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 4, 4), (1024, 1, 256, 64))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf9, primals_9, 4096, grid=grid(4096), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 96, 4, 4), (1536, 1, 384, 96))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf11, primals_11, 6144, grid=grid(6144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 96, 4, 4), (1536, 1, 384, 96))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_7.run(buf0, buf13, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 1, 16, 4))
buf15 = empty_strided_cuda((4, 228, 4, 4), (3648, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf4, primals_2, buf7, primals_7, buf12, primals_13, buf14, primals_15, buf15, 14592, grid=grid(14592), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool)
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_9.run(buf14, primals_15, buf16, 256, grid=grid(256), stream=stream0)
del buf14
del primals_15
buf17 = empty_strided_cuda((4, 96, 4, 4), (1536, 1, 384, 96), torch.bool)
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_10.run(buf12, primals_13, buf17, 6144, grid=grid(6144), stream=stream0)
del buf12
del primals_13
buf18 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.bool)
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_11.run(buf7, primals_7, buf18, 4096, grid=grid(4096), stream=stream0)
del buf7
del primals_7
buf19 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_11.run(buf4, primals_2, buf19, 4096, grid=grid(4096), stream=stream0)
del buf4
del primals_2
return (buf15, primals_1, buf0, primals_4, buf1, primals_8, buf2, buf3, primals_14, buf6, buf9, buf11, buf13, buf16, buf17, buf18, buf19, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((48, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 48, 5, 5), (1200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((96, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = Conv2d(in_channels, 64, 1)
self.branch5x5_1 = Conv2d(in_channels, 48, 1)
self.branch5x5_2 = Conv2d(48, 64, 5, padding=2)
self.branch3x3dbl_1 = Conv2d(in_channels, 64, 1)
self.branch3x3dbl_2 = Conv2d(64, 96, 3, padding=1)
self.branch3x3dbl_3 = Conv2d(96, 96, 3, padding=1)
self.branch_pool = Conv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
branches = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
outputs = torch.cat(branches, 1)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'pool_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 48
y1 = yindex // 48
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 48 * x2 + 1200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 96
y1 = yindex // 96
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 96 * x2 + 864 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 4
x1 = xindex // 4 % 4
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-20 + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-16 + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-12 + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-4 + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (4 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (12 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (16 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (20 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (5 * (5 <= 2 + x1) + (2 + x1) *
(2 + x1 < 5)) * (5 * (5 <= 2 + x2) + (2 + x2) * (2 + x2 < 5)
) + -1 * x1 * (5 * (5 <= 2 + x2) + (2 + x2) * (2 + x2 < 5)
) + -1 * x2 * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)) + (5 *
(5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)) + (5 * (5 <= 2 + x2) + (2 +
x2) * (2 + x2 < 5))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, xmask)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 14592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 228
x0 = xindex % 16
x2 = xindex // 3648
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x0 + 1024 * x2 + x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 128, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + (64 * x0 + 1024 * x2 + (-64 + x1)), tmp15 &
xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr3 + (-64 + x1), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 224, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + (96 * x0 + 1536 * x2 + (-128 + x1)), tmp25 &
xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr5 + (-128 + x1), tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tl.full([1], 228, tl.int64)
tmp35 = tl.load(in_ptr6 + (4 * x0 + 64 * x2 + (-224 + x1)), tmp32 &
xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr7 + (-224 + x1), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + x3, tmp43, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (48, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (48,), (1,))
assert_size_stride(primals_6, (64, 48, 5, 5), (1200, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (96, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (96,), (1,))
assert_size_stride(primals_12, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_13, (96,), (1,))
assert_size_stride(primals_14, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((64, 48, 5, 5), (1200, 1, 240, 48), torch
.float32)
triton_poi_fused_1[grid(3072, 25)](primals_6, buf1, 3072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((96, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(6144, 9)](primals_10, buf2, 6144, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf3 = empty_strided_cuda((96, 96, 3, 3), (864, 1, 288, 96), torch.
float32)
triton_poi_fused_3[grid(9216, 9)](primals_12, buf3, 9216, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf4 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 1, 256, 64))
buf5 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 48, 4, 4), (768, 1, 192, 48))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_4[grid(3072)](buf6, primals_5,
3072, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf7 = extern_kernels.convolution(buf6, buf1, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 4, 4), (1024, 1, 256, 64))
buf8 = extern_kernels.convolution(buf0, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 4, 4), (1024, 1, 256, 64))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_5[grid(4096)](buf9, primals_9,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf10 = extern_kernels.convolution(buf9, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 96, 4, 4), (1536, 1, 384, 96))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_6[grid(6144)](buf11, primals_11,
6144, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf12 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 96, 4, 4), (1536, 1, 384, 96))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_avg_pool2d_7[grid(256)](buf0, buf13, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 1, 16, 4))
buf15 = empty_strided_cuda((4, 228, 4, 4), (3648, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_8[grid(14592)](buf4, primals_2, buf7,
primals_7, buf12, primals_13, buf14, primals_15, buf15, 14592,
XBLOCK=128, num_warps=4, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(256)](buf14
, primals_15, buf16, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf14
del primals_15
buf17 = empty_strided_cuda((4, 96, 4, 4), (1536, 1, 384, 96), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(6144)](
buf12, primals_13, buf17, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del buf12
del primals_13
buf18 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(4096)](
buf7, primals_7, buf18, 4096, XBLOCK=128, num_warps=4, num_stages=1
)
del buf7
del primals_7
buf19 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(4096)](
buf4, primals_2, buf19, 4096, XBLOCK=128, num_warps=4, num_stages=1
)
del buf4
del primals_2
return (buf15, primals_1, buf0, primals_4, buf1, primals_8, buf2, buf3,
primals_14, buf6, buf9, buf11, buf13, buf16, buf17, buf18, buf19)
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class InceptionANew(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionANew, self).__init__()
self.branch1x1 = Conv2d(in_channels, 64, 1)
self.branch5x5_1 = Conv2d(in_channels, 48, 1)
self.branch5x5_2 = Conv2d(48, 64, 5, padding=2)
self.branch3x3dbl_1 = Conv2d(in_channels, 64, 1)
self.branch3x3dbl_2 = Conv2d(64, 96, 3, padding=1)
self.branch3x3dbl_3 = Conv2d(96, 96, 3, padding=1)
self.branch_pool = Conv2d(in_channels, pool_features, kernel_size=1)
def forward(self, input_0):
primals_1 = self.branch1x1.conv.weight
primals_2 = self.branch1x1.conv.bias
primals_4 = self.branch5x5_1.conv.weight
primals_5 = self.branch5x5_1.conv.bias
primals_6 = self.branch5x5_2.conv.weight
primals_7 = self.branch5x5_2.conv.bias
primals_8 = self.branch3x3dbl_1.conv.weight
primals_9 = self.branch3x3dbl_1.conv.bias
primals_10 = self.branch3x3dbl_2.conv.weight
primals_11 = self.branch3x3dbl_2.conv.bias
primals_12 = self.branch3x3dbl_3.conv.weight
primals_13 = self.branch3x3dbl_3.conv.bias
primals_14 = self.branch_pool.conv.weight
primals_15 = self.branch_pool.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| Hiroaki-Ozaki/modelib-classification | InceptionA | false | 17,407 | [
"WTFPL"
] | 10 | 11077704cc0bc9a42fc4b94da60b57d31ff0f65c | https://github.com/Hiroaki-Ozaki/modelib-classification/tree/11077704cc0bc9a42fc4b94da60b57d31ff0f65c |
FeatLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ha/cha3rsc67sfv5ivttzvgdd3ui7g4thg6g7h5fngzlrpojpcw6c2f.py
# Topologically Sorted Source Nodes: [abs_1, eq, target, binary_cross_entropy_with_logits, loss, abs_2, eq_1, target_1, binary_cross_entropy_with_logits_1, loss_1, abs_3, eq_2, target_2, binary_cross_entropy_with_logits_2, loss_2, abs_4, eq_3, target_3, binary_cross_entropy_with_logits_3, loss_3, truediv], Original ATen: [aten.abs, aten.eq, aten._to_copy, aten.binary_cross_entropy_with_logits, aten.add, aten.div]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_3
# abs_3 => abs_5
# abs_4 => abs_7
# binary_cross_entropy_with_logits => abs_2, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# binary_cross_entropy_with_logits_1 => abs_4, exp_1, full_default_1, log1p_1, mean_1, minimum_1, mul_1, neg_1, sub_3, sub_4, sub_5
# binary_cross_entropy_with_logits_2 => abs_6, exp_2, full_default_2, log1p_2, mean_2, minimum_2, mul_2, neg_2, sub_6, sub_7, sub_8
# binary_cross_entropy_with_logits_3 => abs_8, exp_3, full_default_3, log1p_3, mean_3, minimum_3, mul_3, neg_3, sub_10, sub_11, sub_9
# eq => eq
# eq_1 => eq_1
# eq_2 => eq_2
# eq_3 => eq_3
# loss => add
# loss_1 => add_1
# loss_2 => add_2
# loss_3 => add_3
# target => convert_element_type
# target_1 => convert_element_type_1
# target_2 => convert_element_type_2
# target_3 => convert_element_type_3
# truediv => div
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_1, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %select), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %select), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 0), kwargs = {})
# %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select_1,), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_3, 0), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq_1, torch.float32), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %select_1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %select_1), kwargs = {})
# %abs_4 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select_1,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_4,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_1, %log1p_1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub_4), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_5,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mean_1), kwargs = {})
# %abs_5 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select_2,), kwargs = {})
# %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_5, 0), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq_2, torch.float32), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %select_2), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum_2 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_2, %select_2), kwargs = {})
# %abs_6 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select_2,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_6,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_2,), kwargs = {})
# %log1p_2 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_2, %log1p_2), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %sub_7), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_8,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mean_2), kwargs = {})
# %abs_7 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select_3,), kwargs = {})
# %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_7, 0), kwargs = {})
# %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq_3, torch.float32), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %select_3), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum_3 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_3, %select_3), kwargs = {})
# %abs_8 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select_3,), kwargs = {})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_8,), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_3,), kwargs = {})
# %log1p_3 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_3,), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_3, %log1p_3), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %sub_10), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_11,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mean_3), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, 4), kwargs = {})
triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_div_eq_0 = async_compile.triton('triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_div_eq_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_div_eq_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_div_eq_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp17 = tl.load(in_ptr0 + (256 + r0), None)
tmp32 = tl.load(in_ptr0 + (512 + r0), None)
tmp47 = tl.load(in_ptr0 + (768 + r0), None)
tmp1 = tl_math.abs(tmp0)
tmp2 = 0.0
tmp3 = tmp1 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp0
tmp8 = triton_helpers.minimum(tmp2, tmp0)
tmp9 = -tmp1
tmp10 = tl_math.exp(tmp9)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp8 - tmp11
tmp13 = tmp7 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp18 = tl_math.abs(tmp17)
tmp19 = tmp18 == tmp2
tmp20 = tmp19.to(tl.float32)
tmp21 = tmp5 - tmp20
tmp22 = tmp21 * tmp17
tmp23 = triton_helpers.minimum(tmp2, tmp17)
tmp24 = -tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = libdevice.log1p(tmp25)
tmp27 = tmp23 - tmp26
tmp28 = tmp22 - tmp27
tmp29 = tl.broadcast_to(tmp28, [RBLOCK])
tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0))
tmp33 = tl_math.abs(tmp32)
tmp34 = tmp33 == tmp2
tmp35 = tmp34.to(tl.float32)
tmp36 = tmp5 - tmp35
tmp37 = tmp36 * tmp32
tmp38 = triton_helpers.minimum(tmp2, tmp32)
tmp39 = -tmp33
tmp40 = tl_math.exp(tmp39)
tmp41 = libdevice.log1p(tmp40)
tmp42 = tmp38 - tmp41
tmp43 = tmp37 - tmp42
tmp44 = tl.broadcast_to(tmp43, [RBLOCK])
tmp46 = triton_helpers.promote_to_tensor(tl.sum(tmp44, 0))
tmp48 = tl_math.abs(tmp47)
tmp49 = tmp48 == tmp2
tmp50 = tmp49.to(tl.float32)
tmp51 = tmp5 - tmp50
tmp52 = tmp51 * tmp47
tmp53 = triton_helpers.minimum(tmp2, tmp47)
tmp54 = -tmp48
tmp55 = tl_math.exp(tmp54)
tmp56 = libdevice.log1p(tmp55)
tmp57 = tmp53 - tmp56
tmp58 = tmp52 - tmp57
tmp59 = tl.broadcast_to(tmp58, [RBLOCK])
tmp61 = triton_helpers.promote_to_tensor(tl.sum(tmp59, 0))
tmp62 = 256.0
tmp63 = tmp16 / tmp62
tmp64 = tmp63 + tmp2
tmp65 = tmp31 / tmp62
tmp66 = tmp64 + tmp65
tmp67 = tmp46 / tmp62
tmp68 = tmp66 + tmp67
tmp69 = tmp61 / tmp62
tmp70 = tmp68 + tmp69
tmp71 = 0.25
tmp72 = tmp70 * tmp71
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp72, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [abs_1, eq, target, binary_cross_entropy_with_logits, loss, abs_2, eq_1, target_1, binary_cross_entropy_with_logits_1, loss_1, abs_3, eq_2, target_2, binary_cross_entropy_with_logits_2, loss_2, abs_4, eq_3, target_3, binary_cross_entropy_with_logits_3, loss_3, truediv], Original ATen: [aten.abs, aten.eq, aten._to_copy, aten.binary_cross_entropy_with_logits, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_div_eq_0.run(buf4, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
from sklearn import *
class FeatLoss(nn.Module):
"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \\alpha (1-softmax(x)[class])^gamma \\log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def __init__(self, margin=0.0, size_average=None, reduce=None,
reduction='mean'):
super(FeatLoss, self).__init__()
self.loss = torch.nn.BCEWithLogitsLoss(weight=None, size_average=
None, reduce=None, reduction='mean', pos_weight=None)
def forward(self, features_t):
loss = 0
for i, feat in enumerate(features_t):
_B, _C, _H, _W = feat.size()
target = (feat.abs() == 0).float()
loss += self.loss(feat, target)
return loss / len(features_t)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch.nn as nn
from sklearn import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_div_eq_0(
in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp17 = tl.load(in_ptr0 + (256 + r0), None)
tmp32 = tl.load(in_ptr0 + (512 + r0), None)
tmp47 = tl.load(in_ptr0 + (768 + r0), None)
tmp1 = tl_math.abs(tmp0)
tmp2 = 0.0
tmp3 = tmp1 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp0
tmp8 = triton_helpers.minimum(tmp2, tmp0)
tmp9 = -tmp1
tmp10 = tl_math.exp(tmp9)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp8 - tmp11
tmp13 = tmp7 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp18 = tl_math.abs(tmp17)
tmp19 = tmp18 == tmp2
tmp20 = tmp19.to(tl.float32)
tmp21 = tmp5 - tmp20
tmp22 = tmp21 * tmp17
tmp23 = triton_helpers.minimum(tmp2, tmp17)
tmp24 = -tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = libdevice.log1p(tmp25)
tmp27 = tmp23 - tmp26
tmp28 = tmp22 - tmp27
tmp29 = tl.broadcast_to(tmp28, [RBLOCK])
tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0))
tmp33 = tl_math.abs(tmp32)
tmp34 = tmp33 == tmp2
tmp35 = tmp34.to(tl.float32)
tmp36 = tmp5 - tmp35
tmp37 = tmp36 * tmp32
tmp38 = triton_helpers.minimum(tmp2, tmp32)
tmp39 = -tmp33
tmp40 = tl_math.exp(tmp39)
tmp41 = libdevice.log1p(tmp40)
tmp42 = tmp38 - tmp41
tmp43 = tmp37 - tmp42
tmp44 = tl.broadcast_to(tmp43, [RBLOCK])
tmp46 = triton_helpers.promote_to_tensor(tl.sum(tmp44, 0))
tmp48 = tl_math.abs(tmp47)
tmp49 = tmp48 == tmp2
tmp50 = tmp49.to(tl.float32)
tmp51 = tmp5 - tmp50
tmp52 = tmp51 * tmp47
tmp53 = triton_helpers.minimum(tmp2, tmp47)
tmp54 = -tmp48
tmp55 = tl_math.exp(tmp54)
tmp56 = libdevice.log1p(tmp55)
tmp57 = tmp53 - tmp56
tmp58 = tmp52 - tmp57
tmp59 = tl.broadcast_to(tmp58, [RBLOCK])
tmp61 = triton_helpers.promote_to_tensor(tl.sum(tmp59, 0))
tmp62 = 256.0
tmp63 = tmp16 / tmp62
tmp64 = tmp63 + tmp2
tmp65 = tmp31 / tmp62
tmp66 = tmp64 + tmp65
tmp67 = tmp46 / tmp62
tmp68 = tmp66 + tmp67
tmp69 = tmp61 / tmp62
tmp70 = tmp68 + tmp69
tmp71 = 0.25
tmp72 = tmp70 * tmp71
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp72, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_div_eq_0[
grid(1)](buf4, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf4,
class FeatLossNew(nn.Module):
"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \\alpha (1-softmax(x)[class])^gamma \\log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def __init__(self, margin=0.0, size_average=None, reduce=None,
reduction='mean'):
super(FeatLossNew, self).__init__()
self.loss = torch.nn.BCEWithLogitsLoss(weight=None, size_average=
None, reduce=None, reduction='mean', pos_weight=None)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| CityU-AIM-Group/SIGMA | FeatLoss | false | 17,408 | [
"MIT"
] | 5 | 19f89777db8d42f750a9b87756d3326c7efd18f5 | https://github.com/CityU-AIM-Group/SIGMA/tree/19f89777db8d42f750a9b87756d3326c7efd18f5 |
Network | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/kk/ckkqugljg2gshx5su7jnlmfoefqwpccahkhmu7yyep5j3dkgarrh.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3p/c3p4axzzzqbttj2mn6uwh6pylriwqcitydmmfmqz63p3js4tuw2i.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (6, 4), (4, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 6), (6, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 6), (6, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 6), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 6), (96, 24, 6, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 384, grid=grid(384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 6), (6, 1), 0), reinterpret_tensor(primals_4, (6, 1), (1, 6), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf4, 64, grid=grid(64), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 6), (6, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 6), (6, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self, input_dim):
super(Network, self).__init__()
self.first_layer = nn.Linear(input_dim, 6)
self.out_layer = nn.Linear(6, 1)
def forward(self, x):
out = self.first_layer(x)
out = F.relu(out)
out = self.out_layer(out)
out = F.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (6, 4), (4, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 6), (6, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 6), (6, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 6), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 6), (96, 24, 6, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(384)](buf1,
primals_2, buf5, 384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 6), (6, 1), 0),
reinterpret_tensor(primals_4, (6, 1), (1, 6), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf3,
primals_5, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 6), (6, 1), 0), buf4, primals_4, buf5
class NetworkNew(nn.Module):
def __init__(self, input_dim):
super(NetworkNew, self).__init__()
self.first_layer = nn.Linear(input_dim, 6)
self.out_layer = nn.Linear(6, 1)
def forward(self, input_0):
primals_1 = self.first_layer.weight
primals_2 = self.first_layer.bias
primals_4 = self.out_layer.weight
primals_5 = self.out_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| HyperScypion/KMS_Neural_Networks | Network | false | 17,409 | [
"MIT"
] | 6 | 71d0e9c6ee02ea7978ac8ab1b899290743afac7d | https://github.com/HyperScypion/KMS_Neural_Networks/tree/71d0e9c6ee02ea7978ac8ab1b899290743afac7d |
MetaPathEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/se/cselyq4rs6tehxsqjomgiul72uyxozfkucxjgbqwiwrareoksnv7.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/t6/ct66lng2mfhsx3dmmebjhaafi2vtb3lcx6zkrmnoxvsezjusotj4.py
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_2, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (16 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/v7/cv7c2y4f5wyuznv5kwugqtbyqia66jg2b52ur5k27dlhokvgi4j4.py
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_4, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (32 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/r6/cr6rtx7miwqu55elazfj6nvwxmshmjzz6cmcj3fkf4y55dxzp547.py
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# output_4 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %relu_2], 2), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + ((4*x1) + x0), tmp7 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (x0), tmp7 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp7, tmp12, tmp13)
tmp15 = tmp0 >= tmp5
tmp16 = tmp15 & tmp4
tmp17 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + ((-4) + x0), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = triton_helpers.maximum(tmp11, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp16, tmp20, tmp21)
tmp23 = tl.where(tmp6, tmp14, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tmp27 = tl.full([1], 12, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tl.load(in_ptr3 + ((4*x1) + ((-8) + x0)), tmp26 & xmask, eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr1 + ((-8) + x0), tmp26 & xmask, eviction_policy='evict_last', other=0.0)
tmp31 = tmp29 + tmp30
tmp32 = triton_helpers.maximum(tmp11, tmp31)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp26, tmp32, tmp33)
tmp35 = tl.where(tmp4, tmp25, tmp34)
tl.store(out_ptr0 + (x0 + (16*x1)), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/oq/coqgllnm2jd7zzomhku5m5bndveuipwx322cdasof5wjm3ycdum6.py
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_6, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (48 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/j2/cj2acwxuobbnsqqpakov5qzhofrsrmqecoipyuijlnjijuzuynz3.py
# Topologically Sorted Source Nodes: [output_1, tmp_output_1, tmp_output_4, tmp_output_7], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_1 => relu
# tmp_output_1 => relu_1
# tmp_output_4 => relu_2
# tmp_output_7 => relu_3
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_1,), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_3,), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_5,), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_7,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: '*i1', 8: '*i1', 9: '*i1', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (x2), xmask)
tmp11 = tl.load(in_ptr3 + (x2), xmask)
tmp15 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp10 = tmp9 <= tmp5
tmp12 = tmp11 + tmp1
tmp13 = triton_helpers.maximum(tmp3, tmp12)
tmp14 = tmp13 <= tmp5
tmp16 = tmp15 + tmp1
tmp17 = triton_helpers.maximum(tmp3, tmp16)
tmp18 = tmp17 <= tmp5
tl.store(out_ptr0 + (x0 + (16*x1)), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp6, xmask)
tl.store(out_ptr2 + (x2), tmp10, xmask)
tl.store(out_ptr3 + (x2), tmp14, xmask)
tl.store(out_ptr4 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hh/chh5xhwb4naillthtxnxu72t76wsj2ybhmgj2agqig4j4acknbxj.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 1), kwargs = {})
triton_poi_fused_max_6 = async_compile.triton('triton_poi_fused_max_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tmp0 > tmp1
tmp8 = tmp0 == tmp1
tmp9 = tmp0 != tmp0
tmp10 = tmp1 != tmp1
tmp11 = tmp9 > tmp10
tmp12 = tmp7 | tmp11
tmp13 = tmp9 & tmp10
tmp14 = tmp8 | tmp13
tmp15 = tl.full([1], 0, tl.int64)
tmp16 = tl.full([1], 1, tl.int64)
tmp17 = tmp15 < tmp16
tmp18 = tmp14 & tmp17
tmp19 = tmp12 | tmp18
tmp20 = tl.where(tmp19, tmp0, tmp1)
tmp21 = tl.where(tmp19, tmp15, tmp16)
tmp22 = tmp20 > tmp3
tmp23 = tmp20 == tmp3
tmp24 = tmp20 != tmp20
tmp25 = tmp3 != tmp3
tmp26 = tmp24 > tmp25
tmp27 = tmp22 | tmp26
tmp28 = tmp24 & tmp25
tmp29 = tmp23 | tmp28
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp21 < tmp30
tmp32 = tmp29 & tmp31
tmp33 = tmp27 | tmp32
tmp34 = tl.where(tmp33, tmp20, tmp3)
tmp35 = tl.where(tmp33, tmp21, tmp30)
tmp36 = tmp34 > tmp5
tmp37 = tmp34 == tmp5
tmp38 = tmp34 != tmp34
tmp39 = tmp5 != tmp5
tmp40 = tmp38 > tmp39
tmp41 = tmp36 | tmp40
tmp42 = tmp38 & tmp39
tmp43 = tmp37 | tmp42
tmp44 = tl.full([1], 3, tl.int64)
tmp45 = tmp35 < tmp44
tmp46 = tmp43 & tmp45
tmp47 = tmp41 | tmp46
tmp48 = tl.where(tmp47, tmp34, tmp5)
tmp49 = tl.where(tmp47, tmp35, tmp44)
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp49, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1), (4, 1, 1))
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(primals_1, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1), (4, 1, 1))
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(primals_1, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1), (4, 1, 1))
buf10 = reinterpret_tensor(buf4, (4, 1, 16), (16, 16, 1), 0); del buf4 # reuse
buf6 = reinterpret_tensor(buf10, (4, 1, 12), (16, 16, 1), 0) # alias
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf1, primals_3, buf3, buf5, buf6, 48, grid=grid(48), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_4.run(primals_1, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 1), (4, 1, 1))
del buf7
buf9 = reinterpret_tensor(buf10, (4, 1, 4), (16, 16, 1), 12) # alias
buf13 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
buf14 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
buf15 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
buf16 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_1, tmp_output_1, tmp_output_4, tmp_output_7], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf8, primals_3, buf5, buf3, buf1, buf9, buf13, buf14, buf15, buf16, 16, grid=grid(16), stream=stream0)
del buf1
del buf3
del buf5
del primals_3
buf11 = reinterpret_tensor(buf8, (4, 1, 4), (4, 4, 1), 0); del buf8 # reuse
buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
triton_poi_fused_max_6.run(buf10, buf11, buf12, 16, grid=grid(16), stream=stream0)
del buf10
del buf6
del buf9
return (buf11, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (64, 1, 4), 0), reinterpret_tensor(primals_1, (4, 4, 4), (64, 1, 4), 16), reinterpret_tensor(primals_1, (4, 4, 4), (64, 1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 4), (64, 1, 4), 48), buf12, buf13, buf14, buf15, buf16, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MetaPathEmbedding(nn.Module):
def __init__(self, path_num, hop_num, feature_size, latent_dim):
super(MetaPathEmbedding, self).__init__()
self.path_num = path_num
self.hop_num = hop_num
self.feature_size = feature_size
self.latent_dim = latent_dim
self.lam = lambda x, index: x[:, index, :, :]
if hop_num == 3:
kernel_size = 3
elif hop_num == 4:
kernel_size = 4
else:
raise Exception('Only support 3-hop or 4-hop metapaths, hop %d' %
hop_num)
self.conv1D = nn.Conv1d(in_channels=self.feature_size, out_channels
=self.latent_dim, kernel_size=kernel_size, stride=1, padding=0)
nn.init.xavier_uniform_(self.conv1D.weight.data)
self.dropout = nn.Dropout(p=0.5)
def forward(self, input):
input = input.view((-1, self.path_num, self.hop_num, self.feature_size)
)
path_input = self.lam(input, 0)
path_input = path_input.permute(0, 2, 1)
output = self.conv1D(path_input).permute(0, 2, 1)
output = F.relu(output)
output = self.dropout(output)
for i in range(1, self.path_num):
path_input = self.lam(input, i)
path_input = path_input.permute(0, 2, 1)
tmp_output = self.conv1D(path_input).permute(0, 2, 1)
tmp_output = F.relu(tmp_output)
tmp_output = self.dropout(tmp_output)
output = torch.cat((output, tmp_output), 2)
output = output.view((-1, self.path_num, self.latent_dim))
output = torch.max(output, 1, keepdim=True)[0]
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'path_num': 4, 'hop_num': 4, 'feature_size': 4,
'latent_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (16 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (32 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + (4 * x1 + x0), tmp7 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + x0, tmp7 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp7, tmp12, tmp13)
tmp15 = tmp0 >= tmp5
tmp16 = tmp15 & tmp4
tmp17 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (-4 + x0), tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = triton_helpers.maximum(tmp11, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp16, tmp20, tmp21)
tmp23 = tl.where(tmp6, tmp14, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tl.full([1], 12, tl.int64)
tmp29 = tl.load(in_ptr3 + (4 * x1 + (-8 + x0)), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr1 + (-8 + x0), tmp26 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tmp29 + tmp30
tmp32 = triton_helpers.maximum(tmp11, tmp31)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp26, tmp32, tmp33)
tmp35 = tl.where(tmp4, tmp25, tmp34)
tl.store(out_ptr0 + (x0 + 16 * x1), tmp35, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (48 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp11 = tl.load(in_ptr3 + x2, xmask)
tmp15 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp10 = tmp9 <= tmp5
tmp12 = tmp11 + tmp1
tmp13 = triton_helpers.maximum(tmp3, tmp12)
tmp14 = tmp13 <= tmp5
tmp16 = tmp15 + tmp1
tmp17 = triton_helpers.maximum(tmp3, tmp16)
tmp18 = tmp17 <= tmp5
tl.store(out_ptr0 + (x0 + 16 * x1), tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
tl.store(out_ptr2 + x2, tmp10, xmask)
tl.store(out_ptr3 + x2, tmp14, xmask)
tl.store(out_ptr4 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_max_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tmp0 > tmp1
tmp8 = tmp0 == tmp1
tmp9 = tmp0 != tmp0
tmp10 = tmp1 != tmp1
tmp11 = tmp9 > tmp10
tmp12 = tmp7 | tmp11
tmp13 = tmp9 & tmp10
tmp14 = tmp8 | tmp13
tmp15 = tl.full([1], 0, tl.int64)
tmp16 = tl.full([1], 1, tl.int64)
tmp17 = tmp15 < tmp16
tmp18 = tmp14 & tmp17
tmp19 = tmp12 | tmp18
tmp20 = tl.where(tmp19, tmp0, tmp1)
tmp21 = tl.where(tmp19, tmp15, tmp16)
tmp22 = tmp20 > tmp3
tmp23 = tmp20 == tmp3
tmp24 = tmp20 != tmp20
tmp25 = tmp3 != tmp3
tmp26 = tmp24 > tmp25
tmp27 = tmp22 | tmp26
tmp28 = tmp24 & tmp25
tmp29 = tmp23 | tmp28
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp21 < tmp30
tmp32 = tmp29 & tmp31
tmp33 = tmp27 | tmp32
tmp34 = tl.where(tmp33, tmp20, tmp3)
tmp35 = tl.where(tmp33, tmp21, tmp30)
tmp36 = tmp34 > tmp5
tmp37 = tmp34 == tmp5
tmp38 = tmp34 != tmp34
tmp39 = tmp5 != tmp5
tmp40 = tmp38 > tmp39
tmp41 = tmp36 | tmp40
tmp42 = tmp38 & tmp39
tmp43 = tmp37 | tmp42
tmp44 = tl.full([1], 3, tl.int64)
tmp45 = tmp35 < tmp44
tmp46 = tmp43 & tmp45
tmp47 = tmp41 | tmp46
tl.where(tmp47, tmp34, tmp5)
tmp49 = tl.where(tmp47, tmp35, tmp44)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp49, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1), (4, 1, 1))
buf2 = buf0
del buf0
triton_poi_fused_convolution_1[grid(16, 4)](primals_1, buf2, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1), (4, 1, 1))
buf4 = buf2
del buf2
triton_poi_fused_convolution_2[grid(16, 4)](primals_1, buf4, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1), (4, 1, 1))
buf10 = reinterpret_tensor(buf4, (4, 1, 16), (16, 16, 1), 0)
del buf4
buf6 = reinterpret_tensor(buf10, (4, 1, 12), (16, 16, 1), 0)
triton_poi_fused_cat_3[grid(48)](buf1, primals_3, buf3, buf5, buf6,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_convolution_4[grid(16, 4)](primals_1, buf7, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 1), (4, 1, 1))
del buf7
buf9 = reinterpret_tensor(buf10, (4, 1, 4), (16, 16, 1), 12)
buf13 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
buf14 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
buf15 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
buf16 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(16)](buf8,
primals_3, buf5, buf3, buf1, buf9, buf13, buf14, buf15, buf16,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf3
del buf5
del primals_3
buf11 = reinterpret_tensor(buf8, (4, 1, 4), (4, 4, 1), 0)
del buf8
buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.int64)
triton_poi_fused_max_6[grid(16)](buf10, buf11, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf10
del buf6
del buf9
return buf11, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (64,
1, 4), 0), reinterpret_tensor(primals_1, (4, 4, 4), (64, 1, 4), 16
), reinterpret_tensor(primals_1, (4, 4, 4), (64, 1, 4), 32
), reinterpret_tensor(primals_1, (4, 4, 4), (64, 1, 4), 48
), buf12, buf13, buf14, buf15, buf16
class MetaPathEmbeddingNew(nn.Module):
def __init__(self, path_num, hop_num, feature_size, latent_dim):
super(MetaPathEmbeddingNew, self).__init__()
self.path_num = path_num
self.hop_num = hop_num
self.feature_size = feature_size
self.latent_dim = latent_dim
self.lam = lambda x, index: x[:, index, :, :]
if hop_num == 3:
kernel_size = 3
elif hop_num == 4:
kernel_size = 4
else:
raise Exception('Only support 3-hop or 4-hop metapaths, hop %d' %
hop_num)
self.conv1D = nn.Conv1d(in_channels=self.feature_size, out_channels
=self.latent_dim, kernel_size=kernel_size, stride=1, padding=0)
nn.init.xavier_uniform_(self.conv1D.weight.data)
self.dropout = nn.Dropout(p=0.5)
def forward(self, input_0):
primals_2 = self.conv1D.weight
primals_3 = self.conv1D.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Hui-Li/MCRec_PyTorch | MetaPathEmbedding | false | 17,410 | [
"MIT"
] | 9 | da4da77d2cade40c0a1961481c8e47ac396d12ee | https://github.com/Hui-Li/MCRec_PyTorch/tree/da4da77d2cade40c0a1961481c8e47ac396d12ee |
layer_1_to_1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/he/cheo53e3z722urjtoz4zrjngleye2ydazod3rsg3kemohv6o36zz.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# output => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_1, [4], True), kwargs = {})
triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2) % 4
x2 = (xindex // 8)
x4 = xindex
tmp0 = x1 + (4*x0)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((16*x2) + (x1 + (4*x0))), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + (16*x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr0 + (1 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr0 + (2 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr0 + (3 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = 0.25
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp6, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tmp21 = tl.load(in_ptr0 + (4 + (16*x2) + (x1 + (4*x0))), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr0 + (4 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr0 + (5 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.load(in_ptr0 + (6 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tl.load(in_ptr0 + (7 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = tmp28 * tmp16
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp6, tmp29, tmp30)
tmp32 = tl.where(tmp4, tmp21, tmp31)
tmp33 = tmp20 + tmp32
tmp34 = tl.load(in_ptr0 + (8 + (16*x2) + (x1 + (4*x0))), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (8 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr0 + (9 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.load(in_ptr0 + (10 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tmp37 + tmp38
tmp40 = tl.load(in_ptr0 + (11 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp16
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp6, tmp42, tmp43)
tmp45 = tl.where(tmp4, tmp34, tmp44)
tmp46 = tmp33 + tmp45
tmp47 = tl.load(in_ptr0 + (12 + (16*x2) + (x1 + (4*x0))), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tl.load(in_ptr0 + (12 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tl.load(in_ptr0 + (13 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = tl.load(in_ptr0 + (14 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 + tmp51
tmp53 = tl.load(in_ptr0 + (15 + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp54 = tmp52 + tmp53
tmp55 = tmp54 * tmp16
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp6, tmp55, tmp56)
tmp58 = tl.where(tmp4, tmp47, tmp57)
tmp59 = tmp46 + tmp58
tl.store(out_ptr0 + (x4), tmp59, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rg/crgopbeuadmrbypnrsm5qjbskhwavrkvlgwlczediobn2wodl7ca.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %primals_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 1, 2), (2, 2, 1))
assert_size_stride(primals_3, (1, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 2, 1), (8, 32, 2, 1, 32), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_sum_0.run(primals_1, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 1, 16), (16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, reinterpret_tensor(buf0, (1, 2, 16), (0, 1, 2), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (1, 16, 2), (2, 2, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 1, 2), (2, 2, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def contractions_1_to_1(inputs, dim, normalization='inf', normalization_val=1.0
):
sum_all = torch.sum(inputs, dim=2).unsqueeze(dim=2)
op1 = inputs
op2 = torch.cat([sum_all for d in range(dim)], dim=2)
if normalization is not None:
if normalization == 'inf':
op2 = op2 / dim
return [op1, op2]
class layer_1_to_1(nn.Module):
"""
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m tensor
:return: output: N x S x m tensor
"""
def __init__(self, input_depth, output_depth, normalization='inf',
normalization_val=1.0, device='cpu'):
super().__init__()
self.input_depth = input_depth
self.output_depth = output_depth
self.normalization = normalization
self.normalization_val = normalization_val
self.device = device
self.basis_dimension = 2
self.coeffs = torch.nn.Parameter(torch.randn(self.input_depth, self
.output_depth, self.basis_dimension) * np.sqrt(2.0) / (self.
input_depth + self.output_depth), requires_grad=True)
self.bias = torch.nn.Parameter(torch.zeros(1, self.output_depth, 1))
def forward(self, inputs):
m = inputs.size(2)
ops_out = contractions_1_to_1(inputs, m, normalization=self.
normalization)
ops_out = torch.stack(ops_out, dim=2)
output = torch.einsum('dsb,ndbi->nsi', self.coeffs, ops_out)
output = output + self.bias
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_depth': 1, 'output_depth': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 4
x2 = xindex // 8
x4 = xindex
tmp0 = x1 + 4 * x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 * x2 + (x1 + 4 * x0)), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + 16 * x2, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr0 + (1 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr0 + (2 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr0 + (3 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = 0.25
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp6, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tmp21 = tl.load(in_ptr0 + (4 + 16 * x2 + (x1 + 4 * x0)), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr0 + (4 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tl.load(in_ptr0 + (5 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.load(in_ptr0 + (6 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tl.load(in_ptr0 + (7 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = tmp28 * tmp16
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp6, tmp29, tmp30)
tmp32 = tl.where(tmp4, tmp21, tmp31)
tmp33 = tmp20 + tmp32
tmp34 = tl.load(in_ptr0 + (8 + 16 * x2 + (x1 + 4 * x0)), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (8 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tl.load(in_ptr0 + (9 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.load(in_ptr0 + (10 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp39 = tmp37 + tmp38
tmp40 = tl.load(in_ptr0 + (11 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp16
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp6, tmp42, tmp43)
tmp45 = tl.where(tmp4, tmp34, tmp44)
tmp46 = tmp33 + tmp45
tmp47 = tl.load(in_ptr0 + (12 + 16 * x2 + (x1 + 4 * x0)), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tl.load(in_ptr0 + (12 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp49 = tl.load(in_ptr0 + (13 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = tl.load(in_ptr0 + (14 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp52 = tmp50 + tmp51
tmp53 = tl.load(in_ptr0 + (15 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp54 = tmp52 + tmp53
tmp55 = tmp54 * tmp16
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp6, tmp55, tmp56)
tmp58 = tl.where(tmp4, tmp47, tmp57)
tmp59 = tmp46 + tmp58
tl.store(out_ptr0 + x4, tmp59, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 1, 2), (2, 2, 1))
assert_size_stride(primals_3, (1, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 2, 1), (8, 32, 2, 1, 32), torch
.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(32)](primals_1, buf0, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 1, 16), (16, 16, 1), torch.float32)
extern_kernels.bmm(primals_2, reinterpret_tensor(buf0, (1, 2, 16),
(0, 1, 2), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(16)](buf2, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (1, 16, 2), (2, 2, 1), 0)
def contractions_1_to_1(inputs, dim, normalization='inf', normalization_val=1.0
):
sum_all = torch.sum(inputs, dim=2).unsqueeze(dim=2)
op1 = inputs
op2 = torch.cat([sum_all for d in range(dim)], dim=2)
if normalization is not None:
if normalization == 'inf':
op2 = op2 / dim
return [op1, op2]
class layer_1_to_1New(nn.Module):
"""
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m tensor
:return: output: N x S x m tensor
"""
def __init__(self, input_depth, output_depth, normalization='inf',
normalization_val=1.0, device='cpu'):
super().__init__()
self.input_depth = input_depth
self.output_depth = output_depth
self.normalization = normalization
self.normalization_val = normalization_val
self.device = device
self.basis_dimension = 2
self.coeffs = torch.nn.Parameter(torch.randn(self.input_depth, self
.output_depth, self.basis_dimension) * np.sqrt(2.0) / (self.
input_depth + self.output_depth), requires_grad=True)
self.bias = torch.nn.Parameter(torch.zeros(1, self.output_depth, 1))
def forward(self, input_0):
primals_2 = self.coeffs
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| HyTruongSon/InvariantGraphNetworks-PyTorch | layer_1_to_1 | false | 17,411 | [
"Apache-2.0"
] | 7 | da9fdaa4f858d6fcae14b08a59d4b172a2aabaf8 | https://github.com/HyTruongSon/InvariantGraphNetworks-PyTorch/tree/da9fdaa4f858d6fcae14b08a59d4b172a2aabaf8 |
Asym_ReLU_Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/bc/cbcelejo5k5s7lrehccyyi5xnpfemn57rdfe3lvzmwm4kjhpdjfh.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr0 + (x0), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (64, 64, 3, 1), (192, 3, 1, 1))
assert_size_stride(primals_2, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_3, (64, 64, 1, 3), (192, 3, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, buf3, 1048576, grid=grid(1048576), stream=stream0)
return (buf2, primals_1, primals_2, primals_3, buf0, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 64, 3, 1), (192, 3, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, 64, 1, 3), (192, 3, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Asym_ReLU_Block(nn.Module):
def __init__(self):
super(Asym_ReLU_Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=(3, 1), stride=1, padding=(1, 0), bias=False)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=(1, 3), stride=1, padding=(0, 1), bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.conv2(self.conv1(x)))
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, None)
tl.store(out_ptr0 + x0, tmp4, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (64, 64, 3, 1), (192, 3, 1, 1))
assert_size_stride(primals_2, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_3, (64, 64, 1, 3), (192, 3, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1048576)](buf2,
buf3, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, primals_3, buf0, buf3
class Asym_ReLU_BlockNew(nn.Module):
def __init__(self):
super(Asym_ReLU_BlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=(3, 1), stride=1, padding=(1, 0), bias=False)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=(1, 3), stride=1, padding=(0, 1), bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| HwangToeMat/Asym_VDSR | Asym_ReLU_Block | false | 17,412 | [
"MIT"
] | 4 | 598200f745434fc6e1bb46b6da7d6cf7b0fdaa50 | https://github.com/HwangToeMat/Asym_VDSR/tree/598200f745434fc6e1bb46b6da7d6cf7b0fdaa50 |
layer_2_to_1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ih/cihm6coi53vofng2hocvykxnx4jvtm7plixifq2etgn6rj4j2yqh.py
# Topologically Sorted Source Nodes: [sum_all], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_all => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [2, 3]), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nr/cnrpg5sjziixtj6pqkbjdd2s62mtqqnhr5lgg6dnudrr4eduqqwc.py
# Topologically Sorted Source Nodes: [ops_out], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# ops_out => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%diagonal, %div, %div_1, %div_2, %div_3], 2), kwargs = {})
triton_poi_fused_stack_1 = async_compile.triton('triton_poi_fused_stack_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + ((5*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0 + (20*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/rn/crnzhc6uaa4yo2b4ae2klnnfvmuvxw6s6ndd2i5b3onqmvtilenh.py
# Topologically Sorted Source Nodes: [op2, op2_1], Original ATen: [aten.cat, aten.div]
# Source node to ATen node mapping:
# op2 => clone
# op2_1 => div
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clone, 4), kwargs = {})
triton_poi_fused_cat_div_2 = async_compile.triton('triton_poi_fused_cat_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (5 + (16*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (10 + (16*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + (20*x1)), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qb/cqbpdwuv5zswwhvm2c4qlv5v5ikio6yvfgpb7suuy6onxb2ifpye.py
# Topologically Sorted Source Nodes: [sum_of_rows, op3], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# op3 => div_1
# sum_of_rows => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [3]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + (20*x1)), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qp/cqpjezslw7oputnfmtmzhqbd6bzby6x7henhwvh7cyizzzga5c6h.py
# Topologically Sorted Source Nodes: [sum_of_cols, op4], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# op4 => div_2
# sum_of_cols => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [2]), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {})
triton_poi_fused_div_sum_4 = async_compile.triton('triton_poi_fused_div_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + (20*x1)), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7n/c7ndwrmoubvjtnh4jglhr3sgfyonme6jyls6lgfwkr3v6ar762dj.py
# Topologically Sorted Source Nodes: [op5, op5_1], Original ATen: [aten.cat, aten.div]
# Source node to ATen node mapping:
# op5 => cat
# op5_1 => div_3
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_2, %unsqueeze_2, %unsqueeze_2, %unsqueeze_2], 2), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cat, 16), kwargs = {})
triton_poi_fused_cat_div_5 = async_compile.triton('triton_poi_fused_cat_div_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr0 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 0.0625
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + (x0 + (20*x1)), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vv/cvvfpmw7wav3skb2kvaxkixaror2ja7hi3zgnhfxeepqynslk47f.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# output => sum_5
# Graph fragment:
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_1, [4], True), kwargs = {})
triton_poi_fused_sum_6 = async_compile.triton('triton_poi_fused_sum_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 20
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = (yindex // 5)
tmp0 = tl.load(in_ptr0 + (x2 + (4*y0) + (80*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (20 + x2 + (4*y0) + (80*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (40 + x2 + (4*y0) + (80*y1)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (60 + x2 + (4*y0) + (80*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (y0 + (5*x2) + (20*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/dg/cdgomsaescygms3lw3xnzoa2dig3vw6chqvl2aqflbzgvevfmr43.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %primals_3), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 5), (5, 5, 1))
assert_size_stride(primals_3, (1, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_all], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 20), (80, 20, 1), torch.float32)
buf1 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 0) # alias
# Topologically Sorted Source Nodes: [ops_out], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(primals_1, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 4) # alias
# Topologically Sorted Source Nodes: [op2, op2_1], Original ATen: [aten.cat, aten.div]
triton_poi_fused_cat_div_2.run(primals_1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 8) # alias
# Topologically Sorted Source Nodes: [sum_of_rows, op3], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_3.run(primals_1, buf3, 64, grid=grid(64), stream=stream0)
buf4 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 12) # alias
# Topologically Sorted Source Nodes: [sum_of_cols, op4], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_4.run(primals_1, buf4, 64, grid=grid(64), stream=stream0)
del primals_1
buf5 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 16) # alias
# Topologically Sorted Source Nodes: [op5, op5_1], Original ATen: [aten.cat, aten.div]
triton_poi_fused_cat_div_5.run(buf0, buf5, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 1, 4, 5, 1), (20, 1, 5, 1, 5), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sum]
triton_poi_fused_sum_6.run(buf6, buf7, 20, 4, grid=grid(20, 4), stream=stream0)
del buf1
del buf2
del buf3
del buf4
del buf5
del buf6
buf8 = reinterpret_tensor(buf0, (1, 1, 16), (16, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, reinterpret_tensor(buf7, (1, 5, 16), (0, 1, 5), 0), out=buf8)
del primals_2
buf9 = reinterpret_tensor(buf8, (4, 1, 4), (4, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf9, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf9, reinterpret_tensor(buf7, (1, 16, 5), (5, 5, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 1, 5), (5, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def contractions_2_to_1(inputs, dim, normalization='inf', normalization_val=1.0
):
diag_part = torch.diagonal(inputs, dim1=2, dim2=3)
sum_diag_part = torch.sum(diag_part, dim=2).unsqueeze(dim=2)
sum_of_rows = torch.sum(inputs, dim=3)
sum_of_cols = torch.sum(inputs, dim=2)
sum_all = torch.sum(inputs, dim=(2, 3))
op1 = diag_part
op2 = torch.cat([sum_diag_part for d in range(dim)], dim=2)
op3 = sum_of_rows
op4 = sum_of_cols
op5 = torch.cat([sum_all.unsqueeze(dim=2) for d in range(dim)], dim=2)
if normalization is not None:
if normalization == 'inf':
op2 = op2 / dim
op3 = op3 / dim
op4 = op4 / dim
op5 = op5 / dim ** 2
return [op1, op2, op3, op4, op5]
class layer_2_to_1(nn.Module):
"""
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m x m tensor
:return: output: N x S x m tensor
"""
def __init__(self, input_depth, output_depth, normalization='inf',
normalization_val=1.0, device='cpu'):
super().__init__()
self.input_depth = input_depth
self.output_depth = output_depth
self.normalization = normalization
self.normalization_val = normalization_val
self.device = device
self.basis_dimension = 5
self.coeffs = torch.nn.Parameter(torch.randn(self.input_depth, self
.output_depth, self.basis_dimension) * np.sqrt(2.0) / (self.
input_depth + self.output_depth), requires_grad=True)
self.bias = torch.nn.Parameter(torch.zeros(1, self.output_depth, 1))
def forward(self, inputs):
m = inputs.size(3)
ops_out = contractions_2_to_1(inputs, m, normalization=self.
normalization)
ops_out = torch.stack(ops_out, dim=2)
output = torch.einsum('dsb,ndbi->nsi', self.coeffs, ops_out)
output = output + self.bias
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_depth': 1, 'output_depth': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (5 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x0 + 20 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (15 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + 20 * x1), tmp8, xmask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + 20 * x1), tmp8, xmask)
@triton.jit
def triton_poi_fused_div_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + 20 * x1), tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr0 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 0.0625
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + (x0 + 20 * x1), tmp24, xmask)
@triton.jit
def triton_poi_fused_sum_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 20
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = yindex // 5
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y0 + 80 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (20 + x2 + 4 * y0 + 80 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (40 + x2 + 4 * y0 + 80 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (60 + x2 + 4 * y0 + 80 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (y0 + 5 * x2 + 20 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 5), (5, 5, 1))
assert_size_stride(primals_3, (1, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 20), (80, 20, 1), torch.float32)
buf1 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 0)
triton_poi_fused_stack_1[grid(64)](primals_1, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 4)
triton_poi_fused_cat_div_2[grid(64)](primals_1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 8)
triton_poi_fused_div_sum_3[grid(64)](primals_1, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 12)
triton_poi_fused_div_sum_4[grid(64)](primals_1, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf5 = reinterpret_tensor(buf6, (4, 4, 4), (80, 20, 1), 16)
triton_poi_fused_cat_div_5[grid(64)](buf0, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 1, 4, 5, 1), (20, 1, 5, 1, 5), torch.
float32)
triton_poi_fused_sum_6[grid(20, 4)](buf6, buf7, 20, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del buf1
del buf2
del buf3
del buf4
del buf5
del buf6
buf8 = reinterpret_tensor(buf0, (1, 1, 16), (16, 16, 1), 0)
del buf0
extern_kernels.bmm(primals_2, reinterpret_tensor(buf7, (1, 5, 16),
(0, 1, 5), 0), out=buf8)
del primals_2
buf9 = reinterpret_tensor(buf8, (4, 1, 4), (4, 4, 1), 0)
del buf8
triton_poi_fused_add_7[grid(16)](buf9, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
return buf9, reinterpret_tensor(buf7, (1, 16, 5), (5, 5, 1), 0)
def contractions_2_to_1(inputs, dim, normalization='inf', normalization_val=1.0
):
diag_part = torch.diagonal(inputs, dim1=2, dim2=3)
sum_diag_part = torch.sum(diag_part, dim=2).unsqueeze(dim=2)
sum_of_rows = torch.sum(inputs, dim=3)
sum_of_cols = torch.sum(inputs, dim=2)
sum_all = torch.sum(inputs, dim=(2, 3))
op1 = diag_part
op2 = torch.cat([sum_diag_part for d in range(dim)], dim=2)
op3 = sum_of_rows
op4 = sum_of_cols
op5 = torch.cat([sum_all.unsqueeze(dim=2) for d in range(dim)], dim=2)
if normalization is not None:
if normalization == 'inf':
op2 = op2 / dim
op3 = op3 / dim
op4 = op4 / dim
op5 = op5 / dim ** 2
return [op1, op2, op3, op4, op5]
class layer_2_to_1New(nn.Module):
"""
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m x m tensor
:return: output: N x S x m tensor
"""
def __init__(self, input_depth, output_depth, normalization='inf',
normalization_val=1.0, device='cpu'):
super().__init__()
self.input_depth = input_depth
self.output_depth = output_depth
self.normalization = normalization
self.normalization_val = normalization_val
self.device = device
self.basis_dimension = 5
self.coeffs = torch.nn.Parameter(torch.randn(self.input_depth, self
.output_depth, self.basis_dimension) * np.sqrt(2.0) / (self.
input_depth + self.output_depth), requires_grad=True)
self.bias = torch.nn.Parameter(torch.zeros(1, self.output_depth, 1))
def forward(self, input_0):
primals_2 = self.coeffs
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| HyTruongSon/InvariantGraphNetworks-PyTorch | layer_2_to_1 | false | 17,413 | [
"Apache-2.0"
] | 7 | da9fdaa4f858d6fcae14b08a59d4b172a2aabaf8 | https://github.com/HyTruongSon/InvariantGraphNetworks-PyTorch/tree/da9fdaa4f858d6fcae14b08a59d4b172a2aabaf8 |
TransformerDecoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/no/cnon5ajue75qf7yiwbkruc77mekmrelvorvagtbhdc4kdbzwzdin.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/a5/ca56rgpdjbilaspdaau44lnrilsxekhmcnbwzhtrcgfbilkik27p.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xl/cxls6dl5dz3ua4ilno7rjcfd6m7p4ydnd3mzfaq2cepnph6e2y7h.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mf/cmfwwg65sbylkho5zzcgwfl3cw2tru7uwmgab3xaed7bzoxeyhoe.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xo/cxo7kebwccavmyl6qt2qy6dv6nxtxbcnmr6bte7ferqglr6pf5lx.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# multi_head_attention_forward => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_11, [1]), kwargs = {})
triton_poi_fused_mean_4 = async_compile.triton('triton_poi_fused_mean_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/a5/ca5u4qmkoeql76ogmh3jtan7rcyuzjntmkbi6vseh2aka4oj7x22.py
# Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt => add
# tgt_1 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ud/cudlmttxldaqwjebx3x6ystcji3chwr3lkxk64maxy4khpdp3nfn.py
# Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt => add
# tgt_1 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_7), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_8), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2l/c2lx4cm43t5ldlcrg54wtrimftmxqc5oaodvjlhhxk26wwohsh7x.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_10), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jr/cjrivzlgffmd4mdkydwzbkypvq43vw5lx7lx34d75oidkn5oukkt.py
# Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# tgt_2 => add_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_12), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/w7/cw7rcffzrlzzq6c736du6qnb5ytwzbniibkuycklm4tfjn6me2ee.py
# Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt_3 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qq/cqqkte7r3xbnvps7wbt5vgxcwukikbvlwemcbrcnle7woo3t3r3c.py
# Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt_3 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_9), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_13), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_14), kwargs = {})
triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (2048, 4), (4, 1))
assert_size_stride(primals_10, (2048, ), (1, ))
assert_size_stride(primals_11, (4, 2048), (2048, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_4, (4, ), (1, ), 4), primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_4, (4, ), (1, ), 8), primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_6
buf10 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mean]
triton_poi_fused_mean_4.run(buf6, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf9, buf11, buf12, 4, grid=grid(4), stream=stream0)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf9, buf11, buf12, primals_7, primals_8, buf13, 16, grid=grid(16), stream=stream0)
del primals_8
buf14 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf13, reinterpret_tensor(primals_9, (4, 2048), (1, 4), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
triton_poi_fused_relu_7.run(buf15, primals_10, 8192, grid=grid(8192), stream=stream0)
del primals_10
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf15, reinterpret_tensor(primals_11, (2048, 4), (1, 2048), 0), out=buf16)
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf17, buf13, primals_12, 16, grid=grid(16), stream=stream0)
del primals_12
buf18 = buf12; del buf12 # reuse
buf19 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf17, buf18, buf19, 4, grid=grid(4), stream=stream0)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_10.run(buf17, buf18, buf19, primals_13, primals_14, buf20, 16, grid=grid(16), stream=stream0)
del buf18
del buf19
del primals_14
return (buf20, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), primals_1, primals_7, primals_13, primals_2, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf13, buf15, buf17, primals_11, primals_9, primals_5, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super(TransformerDecoderLayer, self).__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout
=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = F.relu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
tgt2, attn_weights = self.multihead_attn(tgt, memory, memory)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt, attn_weights
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mean_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (2048, 4), (4, 1))
assert_size_stride(primals_10, (2048,), (1,))
assert_size_stride(primals_11, (4, 2048), (2048, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 4),
primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 8),
primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_6, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_6
buf10 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mean_4[grid(16)](buf6, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(4)](primals_1, buf9,
buf11, buf12, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9,
buf11, buf12, primals_7, primals_8, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
buf14 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_9, (4, 2048), (
1, 4), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_relu_7[grid(8192)](buf15, primals_10, 8192, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_10
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_11, (2048, 4),
(1, 2048), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_add_8[grid(16)](buf17, buf13, primals_12, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_12
buf18 = buf12
del buf12
buf19 = buf11
del buf11
triton_poi_fused_native_layer_norm_9[grid(4)](buf17, buf18, buf19,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(16)](buf17, buf18, buf19,
primals_13, primals_14, buf20, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf18
del buf19
del primals_14
return (buf20, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), primals_1,
primals_7, primals_13, primals_2, buf6, reinterpret_tensor(buf8, (4,
4), (4, 1), 0), buf9, buf13, buf15, buf17, primals_11, primals_9,
primals_5, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0))
class TransformerDecoderLayerNew(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super(TransformerDecoderLayerNew, self).__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout
=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = F.relu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerDecoderLayerNew, self).__setstate__(state)
def forward(self, input_0, input_1):
primals_3 = self.multihead_attn.in_proj_weight
primals_4 = self.multihead_attn.in_proj_bias
primals_1 = self.multihead_attn.out_proj.weight
primals_6 = self.multihead_attn.out_proj.bias
primals_9 = self.linear1.weight
primals_10 = self.linear1.bias
primals_11 = self.linear2.weight
primals_7 = self.linear2.bias
primals_8 = self.norm2.weight
primals_12 = self.norm2.bias
primals_13 = self.norm3.weight
primals_14 = self.norm3.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
| HumaticsLAB/GTM-Transformer | TransformerDecoderLayer | false | 17,414 | [
"MIT"
] | 7 | 94124d3246c7c22d8b952beeda53639a9ad170e3 | https://github.com/HumaticsLAB/GTM-Transformer/tree/94124d3246c7c22d8b952beeda53639a9ad170e3 |
InceptionC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/fb/cfbyh7temr2u5cam5txbekh4n77qsqs7bwxugnr6jx7oljt7yy43.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yp/cypqeovazc5e7kcfstjnu26e3wz4uoa4x23pkx6wonxeeezgobp4.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 147456
xnumel = 3
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = (yindex // 384)
tmp0 = tl.load(in_ptr0 + (x2 + (3*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (384*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bb/cbb73lcdpaj5nrqi26phbsbr4heojrhjiifea5ld4sfmfhvq757v.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 172032
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 448
y1 = (yindex // 448)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (448*x2) + (4032*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yf/cyf7qnp2vqod3rjdbwp75e4f42v5belppdfb4ooojdwzkjbv6nvc.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 24576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/d5/cd5bvyegooz7chfx7jfhl2pc2e2eymegv77vbf42zbfftasokq23.py
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_8 => convolution_4
# x_9 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28672
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 448
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xw/cxwhgwvbrvpa6wjpyfp6eblxakvghceg5b53d254dlqnqtq2rmf2.py
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# branch_pool => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_3, [3, 3], [1, 1], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_5 = async_compile.triton('triton_poi_fused_avg_pool2d_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 4
x1 = (xindex // 4) % 4
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-20) + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-16) + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-12) + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-4) + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x6), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (4 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (12 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (16 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (20 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + ((-1)*x1) + ((-1)*x2) + (x1*x2) + (((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5)))*((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))) + ((-1)*x1*((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))) + ((-1)*x2*((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5)))) + ((5) * ((5) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (5))) + ((5) * ((5) <= (2 + x2)) + (2 + x2) * ((2 + x2) < (5)))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + (x6), tmp53, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3h/c3hjjoloaogewkk3ok2nvwzvf7zezxpfzfl3i4t6k2zexenqv7bb.py
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# outputs => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %cat, %cat_1, %relu_8], 1), kwargs = {})
triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 2048
x0 = xindex % 16
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 320, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((320*x0) + (5120*x2) + x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 1088, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = (-320) + x1
tmp17 = tmp16 >= tmp1
tmp18 = tl.full([1], 384, tl.int64)
tmp19 = tmp16 < tmp18
tmp20 = tmp19 & tmp15
tmp21 = tl.load(in_ptr2 + ((384*x0) + (6144*x2) + ((-320) + x1)), tmp20, eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + ((-320) + x1), tmp20, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp8, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp16 >= tmp18
tmp28 = tl.full([1], 768, tl.int64)
tmp29 = tmp16 < tmp28
tmp30 = tmp27 & tmp15
tmp31 = tl.load(in_ptr4 + ((384*x0) + (6144*x2) + ((-384) + ((-320) + x1))), tmp30, eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr5 + ((-384) + ((-320) + x1)), tmp30, eviction_policy='evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp8, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.where(tmp19, tmp26, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp15, tmp37, tmp38)
tmp40 = tmp0 >= tmp13
tmp41 = tl.full([1], 1856, tl.int64)
tmp42 = tmp0 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = (-1088) + x1
tmp45 = tmp44 >= tmp1
tmp46 = tmp44 < tmp18
tmp47 = tmp46 & tmp43
tmp48 = tl.load(in_ptr6 + ((384*x0) + (6144*x2) + ((-1088) + x1)), tmp47, eviction_policy='evict_last', other=0.0)
tmp49 = tl.load(in_ptr7 + ((-1088) + x1), tmp47, eviction_policy='evict_last', other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = triton_helpers.maximum(tmp8, tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp47, tmp51, tmp52)
tmp54 = tmp44 >= tmp18
tmp55 = tmp44 < tmp28
tmp56 = tmp54 & tmp43
tmp57 = tl.load(in_ptr8 + ((384*x0) + (6144*x2) + ((-384) + ((-1088) + x1))), tmp56, eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr9 + ((-384) + ((-1088) + x1)), tmp56, eviction_policy='evict_last', other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = triton_helpers.maximum(tmp8, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp56, tmp60, tmp61)
tmp63 = tl.where(tmp46, tmp53, tmp62)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp43, tmp63, tmp64)
tmp66 = tmp0 >= tmp41
tmp67 = tl.full([1], 2048, tl.int64)
tmp68 = tmp0 < tmp67
tmp69 = tl.load(in_ptr10 + ((192*x0) + (3072*x2) + ((-1856) + x1)), tmp66, eviction_policy='evict_last', other=0.0)
tmp70 = tl.load(in_ptr11 + ((-1856) + x1), tmp66, eviction_policy='evict_last', other=0.0)
tmp71 = tmp69 + tmp70
tmp72 = triton_helpers.maximum(tmp8, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp66, tmp72, tmp73)
tmp75 = tl.where(tmp43, tmp65, tmp74)
tmp76 = tl.where(tmp15, tmp39, tmp75)
tmp77 = tl.where(tmp4, tmp11, tmp76)
tl.store(out_ptr0 + (x3), tmp77, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ba/cbaxn5xy7wgygesmcvxm27s6m7uvy5hhopp7k5brejvh5hggp7gi.py
# Topologically Sorted Source Nodes: [x_16, x_17], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_16 => convolution_8
# x_17 => relu_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_8, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pt/cpt2o2kp3dfpteg3nwjqjokl65jyudihdknvmlafmtlgo5zjqvqt.py
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_14 => convolution_7
# x_15 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_16, %primals_17, [1, 1], [1, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 24576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/e7/ce7fg6ckgadcdykwhiu5a46fkvrvgetssodsqjkijlbluprpwuf3.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_8 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (320, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (320, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (384, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (384, ), (1, ))
assert_size_stride(primals_6, (384, 384, 1, 3), (1152, 3, 3, 1))
assert_size_stride(primals_7, (384, ), (1, ))
assert_size_stride(primals_8, (384, 384, 3, 1), (1152, 3, 1, 1))
assert_size_stride(primals_9, (384, ), (1, ))
assert_size_stride(primals_10, (448, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (448, ), (1, ))
assert_size_stride(primals_12, (384, 448, 3, 3), (4032, 9, 3, 1))
assert_size_stride(primals_13, (384, ), (1, ))
assert_size_stride(primals_14, (384, 384, 1, 3), (1152, 3, 3, 1))
assert_size_stride(primals_15, (384, ), (1, ))
assert_size_stride(primals_16, (384, 384, 3, 1), (1152, 3, 1, 1))
assert_size_stride(primals_17, (384, ), (1, ))
assert_size_stride(primals_18, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_19, (192, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((384, 384, 1, 3), (1152, 1, 1152, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 147456, 3, grid=grid(147456, 3), stream=stream0)
del primals_6
buf2 = empty_strided_cuda((384, 384, 3, 1), (1152, 1, 384, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_8, buf2, 147456, 3, grid=grid(147456, 3), stream=stream0)
del primals_8
buf3 = empty_strided_cuda((384, 448, 3, 3), (4032, 1, 1344, 448), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_12, buf3, 172032, 9, grid=grid(172032, 9), stream=stream0)
del primals_12
buf4 = empty_strided_cuda((384, 384, 1, 3), (1152, 1, 1152, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_14, buf4, 147456, 3, grid=grid(147456, 3), stream=stream0)
del primals_14
buf5 = empty_strided_cuda((384, 384, 3, 1), (1152, 1, 384, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_16, buf5, 147456, 3, grid=grid(147456, 3), stream=stream0)
del primals_16
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 320, 4, 4), (5120, 1, 1280, 320))
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf8, primals_5, 24576, grid=grid(24576), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, buf1, stride=(1, 1), padding=(0, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 384, 4, 4), (6144, 1, 1536, 384))
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 384, 4, 4), (6144, 1, 1536, 384))
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf0, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 448, 4, 4), (7168, 1, 1792, 448))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf12, primals_11, 28672, grid=grid(28672), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf14, primals_13, 24576, grid=grid(24576), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, buf4, stride=(1, 1), padding=(0, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 384, 4, 4), (6144, 1, 1536, 384))
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf14, buf5, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [branch_pool], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_5.run(buf0, buf17, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 192, 4, 4), (3072, 1, 768, 192))
buf19 = empty_strided_cuda((4, 2048, 4, 4), (32768, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf6, primals_2, buf9, primals_7, buf10, primals_9, buf15, primals_15, buf16, primals_17, buf18, primals_19, buf19, 131072, grid=grid(131072), stream=stream0)
buf20 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192), torch.bool)
# Topologically Sorted Source Nodes: [x_16, x_17], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf18, primals_19, buf20, 12288, grid=grid(12288), stream=stream0)
del buf18
del primals_19
buf21 = empty_strided_cuda((4, 384, 4, 4), (6144, 1, 1536, 384), torch.bool)
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf16, primals_17, buf21, 24576, grid=grid(24576), stream=stream0)
del buf16
del primals_17
buf22 = empty_strided_cuda((4, 384, 4, 4), (6144, 1, 1536, 384), torch.bool)
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf15, primals_15, buf22, 24576, grid=grid(24576), stream=stream0)
del buf15
del primals_15
buf23 = empty_strided_cuda((4, 384, 4, 4), (6144, 1, 1536, 384), torch.bool)
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf10, primals_9, buf23, 24576, grid=grid(24576), stream=stream0)
del buf10
del primals_9
buf24 = empty_strided_cuda((4, 384, 4, 4), (6144, 1, 1536, 384), torch.bool)
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf9, primals_7, buf24, 24576, grid=grid(24576), stream=stream0)
del buf9
del primals_7
buf25 = empty_strided_cuda((4, 320, 4, 4), (5120, 1, 1280, 320), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_9.run(buf6, primals_2, buf25, 20480, grid=grid(20480), stream=stream0)
del buf6
del primals_2
return (buf19, primals_1, buf0, primals_4, buf1, buf2, primals_10, buf3, buf4, buf5, primals_18, buf8, buf12, buf14, buf17, buf20, buf21, buf22, buf23, buf24, buf25, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((320, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((384, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((384, 384, 1, 3), (1152, 3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((384, 384, 3, 1), (1152, 3, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((448, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((448, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((384, 448, 3, 3), (4032, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((384, 384, 1, 3), (1152, 3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((384, 384, 3, 1), (1152, 3, 1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((192, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class InceptionC(nn.Module):
def __init__(self, in_channels):
super(InceptionC, self).__init__()
self.branch1x1 = Conv2d(in_channels, 320, 1)
self.branch3x3_1 = Conv2d(in_channels, 384, 1)
self.branch3x3_2a = Conv2d(384, 384, (1, 3), padding=(0, 1))
self.branch3x3_2b = Conv2d(384, 384, (3, 1), padding=(1, 0))
self.branch3x3dbl_1 = Conv2d(in_channels, 448, 1)
self.branch3x3dbl_2 = Conv2d(448, 384, 3, padding=1)
self.branch3x3dbl_3a = Conv2d(384, 384, (1, 3), padding=(0, 1))
self.branch3x3dbl_3b = Conv2d(384, 384, (3, 1), padding=(1, 0))
self.branch_pool = Conv2d(in_channels, 192, 1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.
branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
branches = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
outputs = torch.cat(branches, 1)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 3
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 3 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 384 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 448
y1 = yindex // 448
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 448 * x2 + 4032 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 448
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 4
x1 = xindex // 4 % 4
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-20 + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-16 + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-12 + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-4 + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (4 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (12 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (16 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (20 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (5 * (5 <= 2 + x1) + (2 + x1) *
(2 + x1 < 5)) * (5 * (5 <= 2 + x2) + (2 + x2) * (2 + x2 < 5)
) + -1 * x1 * (5 * (5 <= 2 + x2) + (2 + x2) * (2 + x2 < 5)
) + -1 * x2 * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)) + (5 *
(5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)) + (5 * (5 <= 2 + x2) + (2 +
x2) * (2 + x2 < 5))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 2048
x0 = xindex % 16
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 320, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (320 * x0 + 5120 * x2 + x1), tmp4,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 1088, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = -320 + x1
tmp18 = tl.full([1], 384, tl.int64)
tmp19 = tmp16 < tmp18
tmp20 = tmp19 & tmp15
tmp21 = tl.load(in_ptr2 + (384 * x0 + 6144 * x2 + (-320 + x1)), tmp20,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (-320 + x1), tmp20, eviction_policy=
'evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp8, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp16 >= tmp18
tl.full([1], 768, tl.int64)
tmp30 = tmp27 & tmp15
tmp31 = tl.load(in_ptr4 + (384 * x0 + 6144 * x2 + (-384 + (-320 + x1))),
tmp30, eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr5 + (-384 + (-320 + x1)), tmp30, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp8, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.where(tmp19, tmp26, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp15, tmp37, tmp38)
tmp40 = tmp0 >= tmp13
tmp41 = tl.full([1], 1856, tl.int64)
tmp42 = tmp0 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = -1088 + x1
tmp46 = tmp44 < tmp18
tmp47 = tmp46 & tmp43
tmp48 = tl.load(in_ptr6 + (384 * x0 + 6144 * x2 + (-1088 + x1)), tmp47,
eviction_policy='evict_last', other=0.0)
tmp49 = tl.load(in_ptr7 + (-1088 + x1), tmp47, eviction_policy=
'evict_last', other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = triton_helpers.maximum(tmp8, tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp47, tmp51, tmp52)
tmp54 = tmp44 >= tmp18
tmp56 = tmp54 & tmp43
tmp57 = tl.load(in_ptr8 + (384 * x0 + 6144 * x2 + (-384 + (-1088 + x1))
), tmp56, eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr9 + (-384 + (-1088 + x1)), tmp56, eviction_policy
='evict_last', other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = triton_helpers.maximum(tmp8, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp56, tmp60, tmp61)
tmp63 = tl.where(tmp46, tmp53, tmp62)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp43, tmp63, tmp64)
tmp66 = tmp0 >= tmp41
tl.full([1], 2048, tl.int64)
tmp69 = tl.load(in_ptr10 + (192 * x0 + 3072 * x2 + (-1856 + x1)), tmp66,
eviction_policy='evict_last', other=0.0)
tmp70 = tl.load(in_ptr11 + (-1856 + x1), tmp66, eviction_policy=
'evict_last', other=0.0)
tmp71 = tmp69 + tmp70
tmp72 = triton_helpers.maximum(tmp8, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp66, tmp72, tmp73)
tmp75 = tl.where(tmp43, tmp65, tmp74)
tmp76 = tl.where(tmp15, tmp39, tmp75)
tmp77 = tl.where(tmp4, tmp11, tmp76)
tl.store(out_ptr0 + x3, tmp77, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (320, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (320,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (384, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (384,), (1,))
assert_size_stride(primals_6, (384, 384, 1, 3), (1152, 3, 3, 1))
assert_size_stride(primals_7, (384,), (1,))
assert_size_stride(primals_8, (384, 384, 3, 1), (1152, 3, 1, 1))
assert_size_stride(primals_9, (384,), (1,))
assert_size_stride(primals_10, (448, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (448,), (1,))
assert_size_stride(primals_12, (384, 448, 3, 3), (4032, 9, 3, 1))
assert_size_stride(primals_13, (384,), (1,))
assert_size_stride(primals_14, (384, 384, 1, 3), (1152, 3, 3, 1))
assert_size_stride(primals_15, (384,), (1,))
assert_size_stride(primals_16, (384, 384, 3, 1), (1152, 3, 1, 1))
assert_size_stride(primals_17, (384,), (1,))
assert_size_stride(primals_18, (192, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_19, (192,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((384, 384, 1, 3), (1152, 1, 1152, 384),
torch.float32)
triton_poi_fused_1[grid(147456, 3)](primals_6, buf1, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((384, 384, 3, 1), (1152, 1, 384, 384),
torch.float32)
triton_poi_fused_1[grid(147456, 3)](primals_8, buf2, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((384, 448, 3, 3), (4032, 1, 1344, 448),
torch.float32)
triton_poi_fused_2[grid(172032, 9)](primals_12, buf3, 172032, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf4 = empty_strided_cuda((384, 384, 1, 3), (1152, 1, 1152, 384),
torch.float32)
triton_poi_fused_1[grid(147456, 3)](primals_14, buf4, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_14
buf5 = empty_strided_cuda((384, 384, 3, 1), (1152, 1, 384, 384),
torch.float32)
triton_poi_fused_1[grid(147456, 3)](primals_16, buf5, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_16
buf6 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 320, 4, 4), (5120, 1, 1280, 320))
buf7 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_3[grid(24576)](buf8, primals_5,
24576, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf9 = extern_kernels.convolution(buf8, buf1, stride=(1, 1),
padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf10 = extern_kernels.convolution(buf8, buf2, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf11 = extern_kernels.convolution(buf0, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 448, 4, 4), (7168, 1, 1792, 448))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_4[grid(28672)](buf12, primals_11,
28672, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf13 = extern_kernels.convolution(buf12, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_3[grid(24576)](buf14, primals_13,
24576, XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf15 = extern_kernels.convolution(buf14, buf4, stride=(1, 1),
padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf16 = extern_kernels.convolution(buf14, buf5, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 384, 4, 4), (6144, 1, 1536, 384))
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_avg_pool2d_5[grid(256)](buf0, buf17, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 192, 4, 4), (3072, 1, 768, 192))
buf19 = empty_strided_cuda((4, 2048, 4, 4), (32768, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_6[grid(131072)](buf6, primals_2, buf9,
primals_7, buf10, primals_9, buf15, primals_15, buf16,
primals_17, buf18, primals_19, buf19, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf20 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(12288)](
buf18, primals_19, buf20, 12288, XBLOCK=256, num_warps=4,
num_stages=1)
del buf18
del primals_19
buf21 = empty_strided_cuda((4, 384, 4, 4), (6144, 1, 1536, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(24576)](
buf16, primals_17, buf21, 24576, XBLOCK=128, num_warps=4,
num_stages=1)
del buf16
del primals_17
buf22 = empty_strided_cuda((4, 384, 4, 4), (6144, 1, 1536, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(24576)](
buf15, primals_15, buf22, 24576, XBLOCK=128, num_warps=4,
num_stages=1)
del buf15
del primals_15
buf23 = empty_strided_cuda((4, 384, 4, 4), (6144, 1, 1536, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(24576)](
buf10, primals_9, buf23, 24576, XBLOCK=128, num_warps=4,
num_stages=1)
del buf10
del primals_9
buf24 = empty_strided_cuda((4, 384, 4, 4), (6144, 1, 1536, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(24576)](
buf9, primals_7, buf24, 24576, XBLOCK=128, num_warps=4,
num_stages=1)
del buf9
del primals_7
buf25 = empty_strided_cuda((4, 320, 4, 4), (5120, 1, 1280, 320),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(20480)](
buf6, primals_2, buf25, 20480, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_2
return (buf19, primals_1, buf0, primals_4, buf1, buf2, primals_10, buf3,
buf4, buf5, primals_18, buf8, buf12, buf14, buf17, buf20, buf21,
buf22, buf23, buf24, buf25)
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, batch_norm=
False, **kwargs):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.relu(x)
return x
class InceptionCNew(nn.Module):
def __init__(self, in_channels):
super(InceptionCNew, self).__init__()
self.branch1x1 = Conv2d(in_channels, 320, 1)
self.branch3x3_1 = Conv2d(in_channels, 384, 1)
self.branch3x3_2a = Conv2d(384, 384, (1, 3), padding=(0, 1))
self.branch3x3_2b = Conv2d(384, 384, (3, 1), padding=(1, 0))
self.branch3x3dbl_1 = Conv2d(in_channels, 448, 1)
self.branch3x3dbl_2 = Conv2d(448, 384, 3, padding=1)
self.branch3x3dbl_3a = Conv2d(384, 384, (1, 3), padding=(0, 1))
self.branch3x3dbl_3b = Conv2d(384, 384, (3, 1), padding=(1, 0))
self.branch_pool = Conv2d(in_channels, 192, 1)
def forward(self, input_0):
primals_1 = self.branch1x1.conv.weight
primals_2 = self.branch1x1.conv.bias
primals_4 = self.branch3x3_1.conv.weight
primals_5 = self.branch3x3_1.conv.bias
primals_6 = self.branch3x3_2a.conv.weight
primals_7 = self.branch3x3_2a.conv.bias
primals_8 = self.branch3x3_2b.conv.weight
primals_9 = self.branch3x3_2b.conv.bias
primals_10 = self.branch3x3dbl_1.conv.weight
primals_11 = self.branch3x3dbl_1.conv.bias
primals_12 = self.branch3x3dbl_2.conv.weight
primals_13 = self.branch3x3dbl_2.conv.bias
primals_14 = self.branch3x3dbl_3a.conv.weight
primals_15 = self.branch3x3dbl_3a.conv.bias
primals_16 = self.branch3x3dbl_3b.conv.weight
primals_17 = self.branch3x3dbl_3b.conv.bias
primals_18 = self.branch_pool.conv.weight
primals_19 = self.branch_pool.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
| Hiroaki-Ozaki/modelib-classification | InceptionC | false | 17,415 | [
"WTFPL"
] | 10 | 11077704cc0bc9a42fc4b94da60b57d31ff0f65c | https://github.com/Hiroaki-Ozaki/modelib-classification/tree/11077704cc0bc9a42fc4b94da60b57d31ff0f65c |
PositionalEncoding | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ym/cymuzugshqgqtcherziy77sggebax4tlu25ndrviqb23y6eerjaq.py
# Topologically Sorted Source Nodes: [arange_1, mul, div_term, mul_2, cos, setitem_1], Original ATen: [aten.arange, aten.mul, aten.exp, aten.cos, aten.copy]
# Source node to ATen node mapping:
# arange_1 => iota_1
# cos => cos
# div_term => exp
# mul => mul
# mul_2 => mul_2
# setitem_1 => copy_1
# Graph fragment:
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 2, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_1, -2.302585092994046), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %exp), kwargs = {})
# %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%mul_2,), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_9, %cos), kwargs = {})
# %slice_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%select_int_1, %copy_1, 1, 1, 9223372036854775807, 2), kwargs = {})
triton_poi_fused_arange_copy_cos_exp_mul_0 = async_compile.triton('triton_poi_fused_arange_copy_cos_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_copy_cos_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_copy_cos_exp_mul_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = ((-1) + x0) % 2
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = 2*(triton_helpers.div_floor_integer((-1) + x0, 2))
tmp8 = tmp7.to(tl.float32)
tmp9 = -2.302585092994046
tmp10 = tmp8 * tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = x1
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp13 * tmp11
tmp15 = tl_math.cos(tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = tmp18 == tmp18
tmp20 = x2 % 2
tmp21 = tmp20 == tmp4
tmp22 = 2*(x0 // 2)
tmp23 = tmp22.to(tl.float32)
tmp24 = tmp23 * tmp9
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp13 * tmp25
tmp27 = tl_math.sin(tmp26)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp21, tmp27, tmp28)
tmp30 = 0.0
tmp31 = tl.where(tmp21, tmp29, tmp30)
tmp32 = tl.where(tmp19, tmp31, tmp30)
tmp33 = tl.where(tmp6, tmp17, tmp32)
tl.store(out_ptr0 + (x2), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hl/chlfyil5dyz6qk6bluuvrnnqgrfk4f7xhy75pam2xi5xoj2l3ce4.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %slice_13], -1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = (xindex // 8)
x1 = (xindex // 8) % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = tmp9 == tmp9
tmp11 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = ((-4) + x0) % 2
tmp13 = tmp12 == tmp1
tmp14 = tmp13 & tmp6
tmp15 = 2*(triton_helpers.div_floor_integer((-4) + x0, 2))
tmp16 = tmp15.to(tl.float32)
tmp17 = -2.302585092994046
tmp18 = tmp16 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = x1
tmp21 = tmp20.to(tl.float32)
tmp22 = tmp21 * tmp19
tmp23 = tl_math.sin(tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = 0.0
tmp27 = tl.where(tmp13, tmp25, tmp26)
tmp28 = tl.where(tmp10, tmp27, tmp26)
tmp29 = tl.where(tmp10, tmp11, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp6, tmp29, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + (x4), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [arange_1, mul, div_term, mul_2, cos, setitem_1], Original ATen: [aten.arange, aten.mul, aten.exp, aten.cos, aten.copy]
stream0 = get_raw_stream(0)
triton_poi_fused_arange_copy_cos_exp_mul_0.run(buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(arg0_1, buf0, buf1, 128, grid=grid(128), stream=stream0)
del arg0_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
class PositionalEncoding(nn.Module):
def __init__(self, dimension: 'int', dropout: 'float'=0.1):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.dimension = dimension
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
position = torch.arange(x.shape[1]).unsqueeze(1)
div_term = torch.exp(torch.arange(0, self.dimension, 2) * (-math.
log(10000.0) / self.dimension))
pe: 'torch.Tensor' = torch.zeros(1, x.shape[1], self.dimension).to(x
.device)
pe[0, :, 0::2] = torch.sin(position * div_term)
pe[0, :, 1::2] = torch.cos(position * div_term)
pe = torch.repeat_interleave(pe, x.shape[0], 0)
x = torch.cat((x, pe[:x.shape[0]]), dim=-1)
return self.dropout(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dimension': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_arange_copy_cos_exp_mul_0(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = (-1 + x0) % 2
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = 2 * triton_helpers.div_floor_integer(-1 + x0, 2)
tmp8 = tmp7.to(tl.float32)
tmp9 = -2.302585092994046
tmp10 = tmp8 * tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = x1
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp13 * tmp11
tmp15 = tl_math.cos(tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = tmp18 == tmp18
tmp20 = x2 % 2
tmp21 = tmp20 == tmp4
tmp22 = 2 * (x0 // 2)
tmp23 = tmp22.to(tl.float32)
tmp24 = tmp23 * tmp9
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp13 * tmp25
tmp27 = tl_math.sin(tmp26)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp21, tmp27, tmp28)
tmp30 = 0.0
tmp31 = tl.where(tmp21, tmp29, tmp30)
tmp32 = tl.where(tmp19, tmp31, tmp30)
tmp33 = tl.where(tmp6, tmp17, tmp32)
tl.store(out_ptr0 + x2, tmp33, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = xindex // 8
x1 = xindex // 8 % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = tmp9 == tmp9
tmp11 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = (-4 + x0) % 2
tmp13 = tmp12 == tmp1
tmp14 = tmp13 & tmp6
tmp15 = 2 * triton_helpers.div_floor_integer(-4 + x0, 2)
tmp16 = tmp15.to(tl.float32)
tmp17 = -2.302585092994046
tmp18 = tmp16 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = x1
tmp21 = tmp20.to(tl.float32)
tmp22 = tmp21 * tmp19
tmp23 = tl_math.sin(tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = 0.0
tmp27 = tl.where(tmp13, tmp25, tmp26)
tmp28 = tl.where(tmp10, tmp27, tmp26)
tmp29 = tl.where(tmp10, tmp11, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp6, tmp29, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + x4, tmp32, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_arange_copy_cos_exp_mul_0[grid(16)](buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_1[grid(128)](arg0_1, buf0, buf1, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
del buf0
return buf1,
class PositionalEncodingNew(nn.Module):
def __init__(self, dimension: 'int', dropout: 'float'=0.1):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.dimension = dimension
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| IMDxD/NonAttentiveTacotron | PositionalEncoding | false | 17,416 | [
"MIT"
] | 4 | a227fba1bdfa4c5ec63a0f0364313f3ac0fef1ba | https://github.com/IMDxD/NonAttentiveTacotron/tree/a227fba1bdfa4c5ec63a0f0364313f3ac0fef1ba |
Conv_ReLU_Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/bc/cbcelejo5k5s7lrehccyyi5xnpfemn57rdfe3lvzmwm4kjhpdjfh.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr0 + (x0), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (4, 64, 64, 64), (262144, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, 1048576, grid=grid(1048576), stream=stream0)
return (buf1, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Conv_ReLU_Block(nn.Module):
def __init__(self):
super(Conv_ReLU_Block, self).__init__()
self.conv = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=
3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.conv(x))
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, None)
tl.store(out_ptr0 + x0, tmp4, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (4, 64, 64, 64), (262144, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1048576)](buf1,
buf2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf2
class Conv_ReLU_BlockNew(nn.Module):
def __init__(self):
super(Conv_ReLU_BlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=
3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| HwangToeMat/Asym_VDSR | Conv_ReLU_Block | false | 17,417 | [
"MIT"
] | 4 | 598200f745434fc6e1bb46b6da7d6cf7b0fdaa50 | https://github.com/HwangToeMat/Asym_VDSR/tree/598200f745434fc6e1bb46b6da7d6cf7b0fdaa50 |
AlexNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/xq/cxqxvwhuevcb5oe7gsgfej3rmce7mdc6vqg3mkviccgugis2c7ro.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/az/cazvf33aclbntgyixs3zlm6bdzs672xtry2xl6pc3l3sjzwrnks5.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3p/c3pyndcjwis6gjg54k5ra7p6lgxz6oz2dfefgnwt7eugffzxcdpu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/un/cunmh363rd5jbcg72a7ksvl5yj5nyb74pjmsyfyvur22ujnyjfb5.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 98304
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/gz/cgzxu5ewojxmsm7ahtgrhtuj5cfvco7nkmszn6yduklg2a35of2c.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 98304
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = (yindex // 384)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (384*x2) + (3456*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wu/cwu76v5k3qykuedq2kwod6ewkatwicgnt222bneru5mrfolaws3b.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/id/cidmzgucnkl272rhcrgigxqk6npm55s7rlodnlh7kxnaz3tcg673.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/bz/cbzuxuby6o53ruj4kx2qtqfaplmp5puwrlzygum5is3egen4qcpp.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = (xindex // 64) % 32
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7r/c7rjkmkxzokchpn3ljeveyas2xnbgo26qmmlxx4t6ruz22koxpcn.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/py/cpyilkg353nbf5vmvprqoet7v5souw25ik66r4k342opnk5ewxqk.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 16
x2 = (xindex // 4096)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (16384*x2)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (16384*x2)), None)
tmp3 = tl.load(in_ptr0 + (8192 + x0 + (512*x1) + (16384*x2)), None)
tmp5 = tl.load(in_ptr0 + (8448 + x0 + (512*x1) + (16384*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3w/c3weqstk4khc5p23x2otbu4sgekxhmklnausod2inaocqk2rvwf6.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_6 => convolution_2
# x_7 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/tj/ctj5jku4z47dqhoyougdad2dlf55v5exhtri7ukplxvzunwsfqdp.py
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_8 => convolution_3
# x_9 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pa/cpaq3a2vdhguo56vdfkhs52fcutksph2ckteyvvu3hakhzt5n3gg.py
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_12 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 8
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x2)), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x2)), None)
tmp5 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vs/cvs5k5y2koyg27kof63qcix2acefkvqb43splrt7suxay2f3zwpv.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten._adaptive_avg_pool2d]
# Source node to ATen node mapping:
# x_13 => _adaptive_avg_pool2d
# Graph fragment:
# %_adaptive_avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%getitem_4, [4, 4]), kwargs = {})
triton_poi_fused__adaptive_avg_pool2d_13 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 256], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_13(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 4
y4 = (yindex // 4)
y2 = (yindex // 16)
y5 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x3 + (512*y0) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x3 + (512*y0) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2048 + x3 + (512*y0) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2304 + x3 + (512*y0) + (4096*y4)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (y5 + (16*x3) + (4096*y2)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/d4/cd44apm4p6qqcarmomnugqamwm5gcgpel5ikcumgov2ygpvns5j7.py
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_17 => relu_5
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_13), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_14 = async_compile.triton('triton_poi_fused_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (256, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (384, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (384, ), (1, ))
assert_size_stride(primals_8, (256, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (4096, 4096), (4096, 1))
assert_size_stride(primals_13, (4096, ), (1, ))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096, ), (1, ))
assert_size_stride(primals_16, (10, 4096), (4096, 1))
assert_size_stride(primals_17, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((256, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 16384, 25, grid=grid(16384, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((384, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 98304, 9, grid=grid(98304, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 384, 3, 3), (3456, 1, 1152, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 98304, 9, grid=grid(98304, 9), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_10, buf5, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_10
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf7, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32)
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf7, buf8, buf9, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf11, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf12 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
buf13 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_9.run(buf11, buf12, buf13, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf12, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 384, 16, 16), (98304, 1, 6144, 384))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf15, primals_7, 393216, grid=grid(393216), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf17, primals_9, 262144, grid=grid(262144), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf19, primals_11, 262144, grid=grid(262144), stream=stream0)
del primals_11
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf19, buf20, buf21, 65536, grid=grid(65536), stream=stream0)
buf22 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten._adaptive_avg_pool2d]
triton_poi_fused__adaptive_avg_pool2d_13.run(buf20, buf22, 64, 256, grid=grid(64, 256), stream=stream0)
buf23 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf22, (4, 4096), (4096, 1), 0), reinterpret_tensor(primals_12, (4096, 4096), (1, 4096), 0), out=buf23)
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.relu]
triton_poi_fused_relu_14.run(buf24, primals_13, 16384, grid=grid(16384), stream=stream0)
del primals_13
buf25 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf24, reinterpret_tensor(primals_14, (4096, 4096), (1, 4096), 0), out=buf25)
buf26 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.relu]
triton_poi_fused_relu_14.run(buf26, primals_15, 16384, grid=grid(16384), stream=stream0)
del primals_15
buf27 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, buf26, reinterpret_tensor(primals_16, (4096, 10), (1, 4096), 0), alpha=1, beta=1, out=buf27)
del primals_17
return (buf27, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf8, buf9, buf11, buf12, buf13, buf15, buf17, buf19, buf20, buf21, reinterpret_tensor(buf22, (4, 4096), (4096, 1), 0), buf24, buf26, primals_16, primals_14, primals_12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((384, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4096, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4096, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((10, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
class AlexNet(nn.Module):
def __init__(self, num_classes=10, out_ch_conv1=64, out_ch_conv2=256,
out_ch_conv3=384, out_ch_conv4=256, out_ch_conv5=256, out_ch_fc1=
4096, out_ch_fc2=4096):
super(AlexNet, self).__init__()
self.conv1 = nn.Conv2d(3, out_ch_conv1, kernel_size=3, stride=1,
padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(out_ch_conv1, out_ch_conv2, kernel_size=5,
padding=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(out_ch_conv2, out_ch_conv3, kernel_size=3,
padding=1)
self.conv4 = nn.Conv2d(out_ch_conv3, out_ch_conv4, kernel_size=3,
padding=1)
self.conv5 = nn.Conv2d(out_ch_conv4, out_ch_conv5, kernel_size=3,
padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((4, 4))
self.drop1 = nn.Dropout()
self.fc1 = nn.Linear(out_ch_conv5 * 4 * 4, out_ch_fc1)
self.drop2 = nn.Dropout()
self.fc2 = nn.Linear(out_ch_fc1, out_ch_fc2)
self.fc3 = nn.Linear(out_ch_fc2, num_classes)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool2(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.conv5(x)
x = F.relu(x)
x = self.pool5(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.drop1(x)
x = self.fc1(x)
x = F.relu(x)
x = self.drop2(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 384 * x2 + 3456 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 16
x2 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 16384 * x2), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 16384 * x2), None)
tmp3 = tl.load(in_ptr0 + (8192 + x0 + 512 * x1 + 16384 * x2), None)
tmp5 = tl.load(in_ptr0 + (8448 + x0 + 512 * x1 + 16384 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 8
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_13(in_ptr0, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 4
y4 = yindex // 4
y2 = yindex // 16
y5 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x3 + 512 * y0 + 4096 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x3 + 512 * y0 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2048 + x3 + 512 * y0 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2304 + x3 + 512 * y0 + 4096 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (y5 + 16 * x3 + 4096 * y2), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (256, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (384, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (384,), (1,))
assert_size_stride(primals_8, (256, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (4096, 4096), (4096, 1))
assert_size_stride(primals_13, (4096,), (1,))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096,), (1,))
assert_size_stride(primals_16, (10, 4096), (4096, 1))
assert_size_stride(primals_17, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_2[grid(16384, 25)](primals_4, buf2, 16384, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((384, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(98304, 9)](primals_6, buf3, 98304, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 384, 3, 3), (3456, 1, 1152, 384),
torch.float32)
triton_poi_fused_4[grid(98304, 9)](primals_8, buf4, 98304, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_5[grid(65536, 9)](primals_10, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_6[grid(1048576)](buf7, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(262144)](buf7, buf8,
buf9, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf2, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_8[grid(1048576)](buf11, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf12 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
buf13 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(262144)](buf11,
buf12, buf13, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf14 = extern_kernels.convolution(buf12, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 384, 16, 16), (98304, 1, 6144, 384))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_10[grid(393216)](buf15, primals_7,
393216, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf16 = extern_kernels.convolution(buf15, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_11[grid(262144)](buf17, primals_9,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf18 = extern_kernels.convolution(buf17, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_11[grid(262144)](buf19,
primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_12[grid(65536)](buf19,
buf20, buf21, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf22 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused__adaptive_avg_pool2d_13[grid(64, 256)](buf20,
buf22, 64, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf22, (4, 4096), (4096, 1), 0
), reinterpret_tensor(primals_12, (4096, 4096), (1, 4096), 0),
out=buf23)
buf24 = buf23
del buf23
triton_poi_fused_relu_14[grid(16384)](buf24, primals_13, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf25 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf24, reinterpret_tensor(primals_14, (4096, 4096
), (1, 4096), 0), out=buf25)
buf26 = buf25
del buf25
triton_poi_fused_relu_14[grid(16384)](buf26, primals_15, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf27 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_17, buf26, reinterpret_tensor(
primals_16, (4096, 10), (1, 4096), 0), alpha=1, beta=1, out=buf27)
del primals_17
return (buf27, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf8, buf9,
buf11, buf12, buf13, buf15, buf17, buf19, buf20, buf21,
reinterpret_tensor(buf22, (4, 4096), (4096, 1), 0), buf24, buf26,
primals_16, primals_14, primals_12)
class AlexNetNew(nn.Module):
def __init__(self, num_classes=10, out_ch_conv1=64, out_ch_conv2=256,
out_ch_conv3=384, out_ch_conv4=256, out_ch_conv5=256, out_ch_fc1=
4096, out_ch_fc2=4096):
super(AlexNetNew, self).__init__()
self.conv1 = nn.Conv2d(3, out_ch_conv1, kernel_size=3, stride=1,
padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(out_ch_conv1, out_ch_conv2, kernel_size=5,
padding=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(out_ch_conv2, out_ch_conv3, kernel_size=3,
padding=1)
self.conv4 = nn.Conv2d(out_ch_conv3, out_ch_conv4, kernel_size=3,
padding=1)
self.conv5 = nn.Conv2d(out_ch_conv4, out_ch_conv5, kernel_size=3,
padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((4, 4))
self.drop1 = nn.Dropout()
self.fc1 = nn.Linear(out_ch_conv5 * 4 * 4, out_ch_fc1)
self.drop2 = nn.Dropout()
self.fc2 = nn.Linear(out_ch_fc1, out_ch_fc2)
self.fc3 = nn.Linear(out_ch_fc2, num_classes)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.fc1.weight
primals_13 = self.fc1.bias
primals_14 = self.fc2.weight
primals_15 = self.fc2.bias
primals_16 = self.fc3.weight
primals_17 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| FujitsuLaboratories/CAC | AlexNet | false | 17,418 | [
"Apache-2.0"
] | 8 | d12df8e47f61eaf7d7b0ed355e2d1aa296453f86 | https://github.com/FujitsuLaboratories/CAC/tree/d12df8e47f61eaf7d7b0ed355e2d1aa296453f86 |
nnConv2dSymQuant | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4m/c4m6ijw5w65uf2rvi63g47tzv5ahnd6mqv24xrkrpcdh4msjid6c.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 9) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 144, grid=grid(144), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class SymmetricQuantizeDequantize(torch.autograd.Function):
@staticmethod
def forward(ctx, input, precision, clamp_val):
ctx.save_for_backward(input)
"""
Compute quantization step size. Mapping (-max_val, max_val) linearly to (-127,127)
"""
use_max = True
if use_max:
max_val = torch.max(torch.abs(input))
else:
max_val = clamp_val
delta = max_val / (2 ** (precision - 1) - 1)
input_clamped = torch.clamp(input, -max_val, max_val)
input_q = torch.round(input_clamped / delta)
if precision == 8:
input_q = input_q
elif precision == 16:
input_q = input_q
else:
input_q = input_q
"""
Dequantize introducing a quantization error in the data
"""
input_dq = input_q * delta
input_dq = input_dq
return input.copy_(input_dq)
@staticmethod
def backward(ctx, grad_output):
_input, = ctx.saved_tensors
return grad_output, None
class nnConv2dSymQuant(nn.Conv2d):
"""
Computes 2d conv output
Weights are quantized and dequantized introducing a quantization error
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=1, dilation=1, groups=1, bias=True, padding_mode='zeros',
precision=-1, clamp_val=0.5):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
self.precision = precision
self.clamp_val = clamp_val
def conv2d_forward(self, input, weight):
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def forward(self, input):
if self.precision > 0:
quantWeight = SymmetricQuantizeDequantize.apply
quantWeight(self.weight, self.precision, self.clamp_val)
return self.conv2d_forward(input, self.weight)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144)](buf1, primals_2, 144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class SymmetricQuantizeDequantize(torch.autograd.Function):
@staticmethod
def forward(ctx, input, precision, clamp_val):
ctx.save_for_backward(input)
"""
Compute quantization step size. Mapping (-max_val, max_val) linearly to (-127,127)
"""
use_max = True
if use_max:
max_val = torch.max(torch.abs(input))
else:
max_val = clamp_val
delta = max_val / (2 ** (precision - 1) - 1)
input_clamped = torch.clamp(input, -max_val, max_val)
input_q = torch.round(input_clamped / delta)
if precision == 8:
input_q = input_q
elif precision == 16:
input_q = input_q
else:
input_q = input_q
"""
Dequantize introducing a quantization error in the data
"""
input_dq = input_q * delta
input_dq = input_dq
return input.copy_(input_dq)
@staticmethod
def backward(ctx, grad_output):
_input, = ctx.saved_tensors
return grad_output, None
class nnConv2dSymQuantNew(nn.Conv2d):
"""
Computes 2d conv output
Weights are quantized and dequantized introducing a quantization error
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=1, dilation=1, groups=1, bias=True, padding_mode='zeros',
precision=-1, clamp_val=0.5):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
self.precision = precision
self.clamp_val = clamp_val
def conv2d_forward(self, input, weight):
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| IBM/energy-efficient-resilience | nnConv2dSymQuant | false | 17,419 | [
"Apache-2.0"
] | 4 | 13dfcac143df218abe20ed8d8752a0bd7e5a424b | https://github.com/IBM/energy-efficient-resilience/tree/13dfcac143df218abe20ed8d8752a0bd7e5a424b |
NonAttentiveTacotronLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/gd/cgdngayxhe65zupjzuqvg6xlst6dyiedqweof527uphnysletqre.py
# Topologically Sorted Source Nodes: [prenet_l1, prenet_l2, loss_prenet, postnet_l1, postnet_l2, loss_postnet], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mse_loss, aten.add]
# Source node to ATen node mapping:
# loss_postnet => add_1
# loss_prenet => add
# postnet_l1 => abs_2, mean_2, sub_2
# postnet_l2 => mean_3, pow_2, sub_3
# prenet_l1 => abs_1, mean, sub
# prenet_l2 => mean_1, pow_1, sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg0_1), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_2,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg0_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_2, %mean_3), kwargs = {})
triton_per_fused_abs_add_mean_mse_loss_sub_0 = async_compile.triton('triton_per_fused_abs_add_mean_mse_loss_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mean_mse_loss_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_mean_mse_loss_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp11 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp2 * tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp12 = tmp11 - tmp1
tmp13 = tl_math.abs(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp12 * tmp12
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = 256.0
tmp22 = tmp16 / tmp21
tmp23 = tmp20 / tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp6 / tmp21
tmp26 = tmp10 / tmp21
tmp27 = tmp25 + tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp24, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp27, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vv/cvv6v4ldb5d4tfcj5obc7jb5fgmoehwgglvbfezlkujwpzc3zxns.py
# Topologically Sorted Source Nodes: [mul, model_durations, mul_1, target_durations, loss_durations], Original ATen: [aten.mul, aten.div, aten.mse_loss]
# Source node to ATen node mapping:
# loss_durations => mean_4, pow_3, sub_4
# model_durations => div
# mul => mul
# mul_1 => mul_1
# target_durations => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg3_1, 4), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg4_1, 4), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 4), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_4, 2), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
triton_per_fused_div_mse_loss_mul_1 = async_compile.triton('triton_per_fused_div_mse_loss_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mse_loss_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mse_loss_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tmp3 = 0.25
tmp4 = tmp2 * tmp3
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp3
tmp8 = tmp4 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf6 = buf2; del buf2 # reuse
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [prenet_l1, prenet_l2, loss_prenet, postnet_l1, postnet_l2, loss_postnet], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mse_loss, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_mean_mse_loss_sub_0.run(buf6, buf5, arg1_1, arg0_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [mul, model_durations, mul_1, target_durations, loss_durations], Original ATen: [aten.mul, aten.div, aten.mse_loss]
triton_per_fused_div_mse_loss_mul_1.run(buf7, arg3_1, arg4_1, 1, 256, grid=grid(1), stream=stream0)
del arg3_1
del arg4_1
return (buf5, buf6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class NonAttentiveTacotronLoss(nn.Module):
def __init__(self, sample_rate: 'int', hop_size: 'int'):
super(NonAttentiveTacotronLoss, self).__init__()
self.sample_rate = sample_rate
self.hop_size = hop_size
def forward(self, prenet_mels, postnet_mels, model_durations,
target_durations, target_mels):
target_mels.requires_grad = False
target_durations.requires_grad = False
prenet_l1 = F.l1_loss(prenet_mels, target_mels)
prenet_l2 = F.mse_loss(prenet_mels, target_mels)
postnet_l1 = F.l1_loss(postnet_mels, target_mels)
postnet_l2 = F.mse_loss(postnet_mels, target_mels)
loss_prenet = prenet_l1 + prenet_l2
loss_postnet = postnet_l1 + postnet_l2
model_durations = model_durations * self.hop_size / self.sample_rate
target_durations = target_durations * self.hop_size / self.sample_rate
loss_durations = F.mse_loss(model_durations, target_durations)
return loss_prenet, loss_postnet, loss_durations
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sample_rate': 4, 'hop_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_mean_mse_loss_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp11 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp2 * tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp12 = tmp11 - tmp1
tmp13 = tl_math.abs(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp12 * tmp12
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = 256.0
tmp22 = tmp16 / tmp21
tmp23 = tmp20 / tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp6 / tmp21
tmp26 = tmp10 / tmp21
tmp27 = tmp25 + tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp27, None)
@triton.jit
def triton_per_fused_div_mse_loss_mul_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tmp3 = 0.25
tmp4 = tmp2 * tmp3
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp3
tmp8 = tmp4 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf6 = buf2
del buf2
buf5 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_mean_mse_loss_sub_0[grid(1)](buf6, buf5,
arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf7 = buf4
del buf4
triton_per_fused_div_mse_loss_mul_1[grid(1)](buf7, arg3_1, arg4_1,
1, 256, num_warps=2, num_stages=1)
del arg3_1
del arg4_1
return buf5, buf6, buf7
class NonAttentiveTacotronLossNew(nn.Module):
def __init__(self, sample_rate: 'int', hop_size: 'int'):
super(NonAttentiveTacotronLossNew, self).__init__()
self.sample_rate = sample_rate
self.hop_size = hop_size
def forward(self, input_0, input_1, input_2, input_3, input_4):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0], output[1], output[2]
| IMDxD/NonAttentiveTacotron | NonAttentiveTacotronLoss | false | 17,420 | [
"MIT"
] | 4 | a227fba1bdfa4c5ec63a0f0364313f3ac0fef1ba | https://github.com/IMDxD/NonAttentiveTacotron/tree/a227fba1bdfa4c5ec63a0f0364313f3ac0fef1ba |
layer_1_to_2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/xm/cxmem2yh3ndaaagwlq6cmqgbohkdraaqwqlsaqyw55muaxots357.py
# Topologically Sorted Source Nodes: [op1, op2, op5_1, op2_1, op5_2], Original ATen: [aten.diag_embed, aten.cat, aten.div]
# Source node to ATen node mapping:
# op1 => full_default
# op2 => where_1
# op2_1 => div
# op5_1 => cat_2
# op5_2 => div_1
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view, %permute_1, %full_default), kwargs = {})
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_15, %unsqueeze_15, %unsqueeze_15, %unsqueeze_15], 3), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%where_1, 4), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cat_2, 4), kwargs = {})
triton_poi_fused_cat_diag_embed_div_0 = async_compile.triton('triton_poi_fused_cat_diag_embed_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_diag_embed_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_diag_embed_div_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex % 16
x1 = (xindex // 4) % 4
tmp59 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr0 + (4*x2), tmp17 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tl.load(in_ptr0 + (1 + (4*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.load(in_ptr0 + (2 + (4*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr0 + (3 + (4*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp17, tmp24, tmp25)
tmp27 = tmp0 >= tmp15
tmp28 = tl.full([1], 3, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr0 + (4*x2), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr0 + (1 + (4*x2)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.load(in_ptr0 + (2 + (4*x2)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.load(in_ptr0 + (3 + (4*x2)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp30, tmp37, tmp38)
tmp40 = tmp0 >= tmp28
tmp41 = tl.full([1], 4, tl.int64)
tmp42 = tmp0 < tmp41
tmp43 = tl.load(in_ptr0 + (4*x2), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tl.load(in_ptr0 + (1 + (4*x2)), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp45 = tmp43 + tmp44
tmp46 = tl.load(in_ptr0 + (2 + (4*x2)), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tmp45 + tmp46
tmp48 = tl.load(in_ptr0 + (3 + (4*x2)), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp40, tmp49, tmp50)
tmp52 = tl.where(tmp30, tmp39, tmp51)
tmp53 = tl.where(tmp17, tmp26, tmp52)
tmp54 = tl.where(tmp4, tmp13, tmp53)
tmp55 = 0.25
tmp56 = tmp54 * tmp55
tmp57 = x1
tmp58 = tmp0 == tmp57
tmp61 = tmp59 + tmp60
tmp63 = tmp61 + tmp62
tmp65 = tmp63 + tmp64
tmp66 = 0.0
tmp67 = tl.where(tmp58, tmp65, tmp66)
tmp68 = tmp67 * tmp55
tl.store(out_ptr0 + (x3 + (80*x2)), tmp56, xmask)
tl.store(out_ptr1 + (x3 + (80*x2)), tmp68, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vb/cvbgs6nqka4z7ntatxihcdrnpfjz5m4qm5lhmojrtsiobndjf4ca.py
# Topologically Sorted Source Nodes: [op1, op3], Original ATen: [aten.diag_embed, aten.cat]
# Source node to ATen node mapping:
# op1 => full_default, where
# op3 => cat
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view, %permute, %full_default), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_1, %unsqueeze_1, %unsqueeze_1, %unsqueeze_1], 2), kwargs = {})
triton_poi_fused_cat_diag_embed_1 = async_compile.triton('triton_poi_fused_cat_diag_embed_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_diag_embed_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_diag_embed_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex % 16
tmp3 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.full([1], 0, tl.int64)
tmp7 = tmp1 >= tmp6
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp1 >= tmp8
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp1 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp1 >= tmp12
tmp17 = tl.full([1], 3, tl.int64)
tmp18 = tmp1 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp1 >= tmp17
tmp22 = tl.full([1], 4, tl.int64)
tmp23 = tmp1 < tmp22
tmp24 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp21 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp19, tmp20, tmp24)
tmp26 = tl.where(tmp14, tmp15, tmp25)
tmp27 = tl.where(tmp9, tmp10, tmp26)
tl.store(out_ptr0 + (x3 + (80*x2)), tmp5, xmask)
tl.store(out_ptr1 + (x3 + (80*x2)), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vu/cvumof45irl6wy5v6uvifyskuiqom47picklreltshkxoj7wnebv.py
# Topologically Sorted Source Nodes: [op4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# op4 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_10, %unsqueeze_10, %unsqueeze_10, %unsqueeze_10], 3), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x2 = (xindex // 16)
x4 = xindex % 16
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x3), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x3), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr0 + (x3), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x4 + (80*x2)), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/kn/ckn2gtetkygqcdcsa2b3gpuwjbom5uafwmz7pzeekvb7oxtw4rzy.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# output => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_3, [5], True), kwargs = {})
triton_poi_fused_sum_3 = async_compile.triton('triton_poi_fused_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 20
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = (yindex // 5)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y0) + (320*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (80 + x2 + (16*y0) + (320*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (160 + x2 + (16*y0) + (320*y1)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (240 + x2 + (16*y0) + (320*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (y0 + (5*x2) + (80*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vh/cvh3jm2gsrjlmmnf4h2bylqqd33vvgjyqs3kbe4oiof5cx52x3k6.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_8, %primals_3), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 1, 5), (5, 5, 1))
assert_size_stride(primals_3, (1, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((4, 4, 20, 4), (320, 80, 4, 1), torch.float32)
buf0 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 64) # alias
buf2 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 16) # alias
# Topologically Sorted Source Nodes: [op1, op2, op5_1, op2_1, op5_2], Original ATen: [aten.diag_embed, aten.cat, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_diag_embed_div_0.run(primals_1, buf0, buf2, 256, grid=grid(256), stream=stream0)
buf1 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 0) # alias
buf3 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 32) # alias
# Topologically Sorted Source Nodes: [op1, op3], Original ATen: [aten.diag_embed, aten.cat]
triton_poi_fused_cat_diag_embed_1.run(primals_1, buf1, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 48) # alias
# Topologically Sorted Source Nodes: [op4], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_1, buf4, 256, grid=grid(256), stream=stream0)
del primals_1
buf6 = empty_strided_cuda((4, 1, 4, 4, 5, 1), (80, 80, 20, 5, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sum]
triton_poi_fused_sum_3.run(buf5, buf6, 20, 16, grid=grid(20, 16), stream=stream0)
del buf0
del buf1
del buf2
del buf3
del buf4
del buf5
buf7 = empty_strided_cuda((1, 1, 64), (64, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, reinterpret_tensor(buf6, (1, 5, 64), (0, 1, 5), 0), out=buf7)
del primals_2
buf8 = reinterpret_tensor(buf7, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf8, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf8, reinterpret_tensor(buf6, (1, 64, 5), (5, 5, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 1, 5), (5, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def contractions_1_to_2(inputs, dim, normalization='inf', normalization_val=1.0
):
sum_all = torch.sum(inputs, dim=2).unsqueeze(dim=2)
op1 = torch.diag_embed(inputs, dim1=2, dim2=3)
op2 = torch.diag_embed(torch.cat([sum_all for d in range(dim)], dim=2),
dim1=2, dim2=3)
op3 = torch.cat([torch.unsqueeze(inputs, dim=2) for d in range(dim)], dim=2
)
op4 = torch.cat([torch.unsqueeze(inputs, dim=3) for d in range(dim)], dim=3
)
op5 = torch.cat([sum_all for d in range(dim)], dim=2)
op5 = torch.cat([op5.unsqueeze(dim=3) for d in range(dim)], dim=3)
if normalization is not None:
if normalization == 'inf':
op2 = op2 / dim
op5 = op5 / dim
return [op1, op2, op3, op4, op5]
class layer_1_to_2(nn.Module):
"""
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m tensor
:return: output: N x S x m x m tensor
"""
def __init__(self, input_depth, output_depth, normalization='inf',
normalization_val=1.0, device='cpu'):
super().__init__()
self.input_depth = input_depth
self.output_depth = output_depth
self.normalization = normalization
self.normalization_val = normalization_val
self.device = device
self.basis_dimension = 5
self.coeffs = torch.nn.Parameter(torch.randn(self.input_depth, self
.output_depth, self.basis_dimension) * np.sqrt(2.0) / (self.
input_depth + self.output_depth), requires_grad=True)
self.bias = torch.nn.Parameter(torch.zeros(1, self.output_depth, 1, 1))
def forward(self, inputs):
m = inputs.size(2)
ops_out = contractions_1_to_2(inputs, m, normalization=self.
normalization)
ops_out = torch.stack(ops_out, dim=2)
output = torch.einsum('dsb,ndbij->nsij', self.coeffs, ops_out)
output = output + self.bias
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_depth': 1, 'output_depth': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_diag_embed_div_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
tmp59 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp62 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp64 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x2, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr0 + 4 * x2, tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tl.load(in_ptr0 + (1 + 4 * x2), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.load(in_ptr0 + (2 + 4 * x2), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr0 + (3 + 4 * x2), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp17, tmp24, tmp25)
tmp27 = tmp0 >= tmp15
tmp28 = tl.full([1], 3, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr0 + 4 * x2, tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.load(in_ptr0 + (2 + 4 * x2), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.load(in_ptr0 + (3 + 4 * x2), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp30, tmp37, tmp38)
tmp40 = tmp0 >= tmp28
tl.full([1], 4, tl.int64)
tmp43 = tl.load(in_ptr0 + 4 * x2, tmp40 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp44 = tl.load(in_ptr0 + (1 + 4 * x2), tmp40 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp45 = tmp43 + tmp44
tmp46 = tl.load(in_ptr0 + (2 + 4 * x2), tmp40 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp47 = tmp45 + tmp46
tmp48 = tl.load(in_ptr0 + (3 + 4 * x2), tmp40 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp40, tmp49, tmp50)
tmp52 = tl.where(tmp30, tmp39, tmp51)
tmp53 = tl.where(tmp17, tmp26, tmp52)
tmp54 = tl.where(tmp4, tmp13, tmp53)
tmp55 = 0.25
tmp56 = tmp54 * tmp55
tmp57 = x1
tmp58 = tmp0 == tmp57
tmp61 = tmp59 + tmp60
tmp63 = tmp61 + tmp62
tmp65 = tmp63 + tmp64
tmp66 = 0.0
tmp67 = tl.where(tmp58, tmp65, tmp66)
tmp68 = tmp67 * tmp55
tl.store(out_ptr0 + (x3 + 80 * x2), tmp56, xmask)
tl.store(out_ptr1 + (x3 + 80 * x2), tmp68, xmask)
@triton.jit
def triton_poi_fused_cat_diag_embed_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.full([1], 0, tl.int64)
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp1 >= tmp8
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp1 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tmp1 >= tmp12
tmp17 = tl.full([1], 3, tl.int64)
tmp18 = tmp1 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp19 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp1 >= tmp17
tl.full([1], 4, tl.int64)
tmp24 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp21 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tl.where(tmp19, tmp20, tmp24)
tmp26 = tl.where(tmp14, tmp15, tmp25)
tmp27 = tl.where(tmp9, tmp10, tmp26)
tl.store(out_ptr0 + (x3 + 80 * x2), tmp5, xmask)
tl.store(out_ptr1 + (x3 + 80 * x2), tmp27, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x2 = xindex // 16
x4 = xindex % 16
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + x3, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + x3, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr0 + x3, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x4 + 80 * x2), tmp22, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 20
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = yindex // 5
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y0 + 320 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (80 + x2 + 16 * y0 + 320 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (160 + x2 + 16 * y0 + 320 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (240 + x2 + 16 * y0 + 320 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (y0 + 5 * x2 + 80 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 1, 5), (5, 5, 1))
assert_size_stride(primals_3, (1, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((4, 4, 20, 4), (320, 80, 4, 1), torch.float32
)
buf0 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 64)
buf2 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 16)
get_raw_stream(0)
triton_poi_fused_cat_diag_embed_div_0[grid(256)](primals_1, buf0,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 0)
buf3 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 32)
triton_poi_fused_cat_diag_embed_1[grid(256)](primals_1, buf1, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf5, (4, 4, 4, 4), (320, 80, 4, 1), 48)
triton_poi_fused_cat_2[grid(256)](primals_1, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf6 = empty_strided_cuda((4, 1, 4, 4, 5, 1), (80, 80, 20, 5, 1, 1),
torch.float32)
triton_poi_fused_sum_3[grid(20, 16)](buf5, buf6, 20, 16, XBLOCK=16,
YBLOCK=32, num_warps=4, num_stages=1)
del buf0
del buf1
del buf2
del buf3
del buf4
del buf5
buf7 = empty_strided_cuda((1, 1, 64), (64, 64, 1), torch.float32)
extern_kernels.bmm(primals_2, reinterpret_tensor(buf6, (1, 5, 64),
(0, 1, 5), 0), out=buf7)
del primals_2
buf8 = reinterpret_tensor(buf7, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf7
triton_poi_fused_add_4[grid(64)](buf8, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf8, reinterpret_tensor(buf6, (1, 64, 5), (5, 5, 1), 0)
def contractions_1_to_2(inputs, dim, normalization='inf', normalization_val=1.0
):
sum_all = torch.sum(inputs, dim=2).unsqueeze(dim=2)
op1 = torch.diag_embed(inputs, dim1=2, dim2=3)
op2 = torch.diag_embed(torch.cat([sum_all for d in range(dim)], dim=2),
dim1=2, dim2=3)
op3 = torch.cat([torch.unsqueeze(inputs, dim=2) for d in range(dim)], dim=2
)
op4 = torch.cat([torch.unsqueeze(inputs, dim=3) for d in range(dim)], dim=3
)
op5 = torch.cat([sum_all for d in range(dim)], dim=2)
op5 = torch.cat([op5.unsqueeze(dim=3) for d in range(dim)], dim=3)
if normalization is not None:
if normalization == 'inf':
op2 = op2 / dim
op5 = op5 / dim
return [op1, op2, op3, op4, op5]
class layer_1_to_2New(nn.Module):
"""
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m tensor
:return: output: N x S x m x m tensor
"""
def __init__(self, input_depth, output_depth, normalization='inf',
normalization_val=1.0, device='cpu'):
super().__init__()
self.input_depth = input_depth
self.output_depth = output_depth
self.normalization = normalization
self.normalization_val = normalization_val
self.device = device
self.basis_dimension = 5
self.coeffs = torch.nn.Parameter(torch.randn(self.input_depth, self
.output_depth, self.basis_dimension) * np.sqrt(2.0) / (self.
input_depth + self.output_depth), requires_grad=True)
self.bias = torch.nn.Parameter(torch.zeros(1, self.output_depth, 1, 1))
def forward(self, input_0):
primals_2 = self.coeffs
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| HyTruongSon/InvariantGraphNetworks-PyTorch | layer_1_to_2 | false | 17,421 | [
"Apache-2.0"
] | 7 | da9fdaa4f858d6fcae14b08a59d4b172a2aabaf8 | https://github.com/HyTruongSon/InvariantGraphNetworks-PyTorch/tree/da9fdaa4f858d6fcae14b08a59d4b172a2aabaf8 |
MRAE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ed/cedctu3l7gd6f64jitgom5cb2jria4lpcnyfpjsr2xvu66opllae.py
# Topologically Sorted Source Nodes: [sub, abs_1, add, relative_diff, mean], Original ATen: [aten.sub, aten.abs, aten.add, aten.div, aten.mean]
# Source node to ATen node mapping:
# abs_1 => abs_1
# add => add
# mean => mean
# relative_diff => div
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1.5259021896696422e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
triton_per_fused_abs_add_div_mean_sub_0 = async_compile.triton('triton_per_fused_abs_add_div_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.5259021896696422e-05
tmp5 = tmp1 + tmp4
tmp6 = tmp3 / tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, add, relative_diff, mean], Original ATen: [aten.sub, aten.abs, aten.add, aten.div, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MRAE(nn.Module):
def __init__(self):
super(MRAE, self).__init__()
def forward(self, output, target, mask=None):
relative_diff = torch.abs(output - target) / (target + 1.0 / 65535.0)
if mask is not None:
relative_diff = mask * relative_diff
return torch.mean(relative_diff)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.5259021896696422e-05
tmp5 = tmp1 + tmp4
tmp6 = tmp3 / tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MRAENew(nn.Module):
def __init__(self):
super(MRAENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| IVRL/Multi-Modal-Spectral-Image-Super-Resolution | MRAE | false | 17,422 | [
"MIT"
] | 9 | 6afe35c16d4cc2466e5eb51f3ddc39b43f6f765e | https://github.com/IVRL/Multi-Modal-Spectral-Image-Super-Resolution/tree/6afe35c16d4cc2466e5eb51f3ddc39b43f6f765e |
GLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/vk/cvkt24fhz5lbywfxx4mvpn26tvdwo2fctjm47s2jajq2tb4alrz3.py
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class GLU(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
out, gate = x.chunk(2, dim=self.dim)
return out * gate.sigmoid()
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(512)](arg0_1, buf0, 512, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GLUNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| IIP-Sogang/Audio-Visual-Speech-Recognition | GLU | false | 17,423 | [
"MIT"
] | 9 | bd03be91135acbc6162b83092d462b7fe71dd007 | https://github.com/IIP-Sogang/Audio-Visual-Speech-Recognition/tree/bd03be91135acbc6162b83092d462b7fe71dd007 |
tofp16 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/qs/cqsitslx774fn4tm47bknp7iambphdxldbpgnoby4usp66iq4svq.py
# Topologically Sorted Source Nodes: [half], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# half => convert_element_type
# Graph fragment:
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.float16), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp16', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16)
# Topologically Sorted Source Nodes: [half], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
class tofp16(nn.Module):
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class tofp16New(nn.Module):
def __init__(self):
super(tofp16New, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Icep2020/CrowdGAN | tofp16 | false | 17,424 | [
"MIT"
] | 7 | 4adebaa09460f2f8296d368ffeba03f32c963d4d | https://github.com/Icep2020/CrowdGAN/tree/4adebaa09460f2f8296d368ffeba03f32c963d4d |
GlobalAveragePooling2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/mi/cmi6un5vceleekpu35k7f32kl344t42s5sgsx6ate7i565wrhmw6.py
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# mean_1 => mean_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [-1]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + (x0), tmp36, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as pt
import torch.nn as nn
class GlobalAveragePooling2d(nn.Module):
"""class for performing global average pooling on 2d feature maps"""
def forward(self, x):
"""
calculates the average of each feature map in the tensor
:param x: input tensor of shape [batch, channels, height, width]
:return: tensor that containes the average for each channel, [batch, channel_average]
"""
return pt.mean(pt.mean(x, -1), -1)[..., None, None]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0),
class GlobalAveragePooling2dNew(nn.Module):
"""class for performing global average pooling on 2d feature maps"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| IljaManakov/Autoencoders | GlobalAveragePooling2d | false | 17,425 | [
"MIT"
] | 4 | bd2ccc6decda37a004cc57a41dcd406752c21d61 | https://github.com/IljaManakov/Autoencoders/tree/bd2ccc6decda37a004cc57a41dcd406752c21d61 |
ComplexConvTranspose2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/sd/csdiltakaqdisc5ntg7h3eesi6k7gotidef2no4d6isr3gvriood.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# output => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_4, %unsqueeze_5], -1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = (xindex // 2)
x2 = (xindex // 98)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr2 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr3 + (x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tmp7 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr4 + (x3), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr5 + (x3), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + (x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + (x4), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 7, 7), (196, 49, 7, 1))
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 7, 7), (196, 49, 7, 1))
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 7, 7), (196, 49, 7, 1))
# Topologically Sorted Source Nodes: [conv_transpose2d_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 7, 7), (196, 49, 7, 1))
buf4 = empty_strided_cuda((4, 7, 7, 2), (98, 14, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf0, primals_3, buf1, primals_5, buf2, buf3, buf4, 392, grid=grid(392), stream=stream0)
del buf0
del buf1
del buf2
del buf3
del primals_3
del primals_5
return (buf4, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 0), reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 1), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class ComplexConvTranspose2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, output_padding=0, dilation=1, groups=1, bias=True, **kwargs
):
super().__init__()
self.tconv_re = nn.ConvTranspose2d(in_channel, out_channel,
kernel_size=kernel_size, stride=stride, padding=padding,
output_padding=output_padding, groups=groups, bias=bias,
dilation=dilation, **kwargs)
self.tconv_im = nn.ConvTranspose2d(in_channel, out_channel,
kernel_size=kernel_size, stride=stride, padding=padding,
output_padding=output_padding, groups=groups, bias=bias,
dilation=dilation, **kwargs)
def forward(self, x):
real = self.tconv_re(x[..., 0]) - self.tconv_im(x[..., 1])
imaginary = self.tconv_re(x[..., 1]) + self.tconv_im(x[..., 0])
output = torch.stack((real, imaginary), dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 98
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + x2, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr2 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tl.load(in_ptr3 + x2, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tmp7 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr4 + x3, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + x2, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr5 + x3, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + x2, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + x4, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 0), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 7, 7), (196, 49, 7, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 1), primals_4, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 7, 7), (196, 49, 7, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 1), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 7, 7), (196, 49, 7, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 0), primals_4, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 7, 7), (196, 49, 7, 1))
buf4 = empty_strided_cuda((4, 7, 7, 2), (98, 14, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(392)](buf0, primals_3, buf1,
primals_5, buf2, buf3, buf4, 392, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del buf2
del buf3
del primals_3
del primals_5
return buf4, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4,
4, 4), (256, 64, 16, 4), 0), reinterpret_tensor(primals_1, (1, 4, 4,
4), (256, 64, 16, 4), 1)
class ComplexConvTranspose2dNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, output_padding=0, dilation=1, groups=1, bias=True, **kwargs
):
super().__init__()
self.tconv_re = nn.ConvTranspose2d(in_channel, out_channel,
kernel_size=kernel_size, stride=stride, padding=padding,
output_padding=output_padding, groups=groups, bias=bias,
dilation=dilation, **kwargs)
self.tconv_im = nn.ConvTranspose2d(in_channel, out_channel,
kernel_size=kernel_size, stride=stride, padding=padding,
output_padding=output_padding, groups=groups, bias=bias,
dilation=dilation, **kwargs)
def forward(self, input_0):
primals_1 = self.tconv_re.weight
primals_3 = self.tconv_re.bias
primals_2 = self.tconv_im.weight
primals_5 = self.tconv_im.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| IIP-Sogang/Audio-Visual-Speech-Recognition | ComplexConvTranspose2d | false | 17,426 | [
"MIT"
] | 9 | bd03be91135acbc6162b83092d462b7fe71dd007 | https://github.com/IIP-Sogang/Audio-Visual-Speech-Recognition/tree/bd03be91135acbc6162b83092d462b7fe71dd007 |
FeatureDiscriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/2v/c2v5gg5fg2rvvqv3h6jzthw6zei7yegjmyq7qihij4ujfy4nflxr.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ai/caiuut4x546eijscxxcy3y4fz6imb6acs5u72az6ktea3bn65pyb.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nq/cnqmlefon7wec55zyhfgxzpm3nucv4exbxlloiy5grlzjrdppadt.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# out_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qd/cqdb5zqy7vwtuceg5qrttnn6o2xtmblzvitisfwcl3ywa4qwmkgf.py
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# out_2 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.01), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3g/c3gnukkxdb7eatke7hi27abp666xqcjdcgy4rg3l6cfsyenwlyhf.py
# Topologically Sorted Source Nodes: [conv2d_3, out_3], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# out_3 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.01), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ng/cngtvb5oxtkb56vanucsj6tblery6az5vtxy7la4runjhlxmbmd2.py
# Topologically Sorted Source Nodes: [conv2d_4, out_4], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# out_4 => sigmoid
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_sigmoid_5 = async_compile.triton('triton_poi_fused_convolution_sigmoid_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qi/cqinlenua24sluwfboabfi2gv2tf43gvrzqstib5n5zxoqid4sps.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# out_5 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%sigmoid, [1, 25], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_6 = async_compile.triton('triton_poi_fused_avg_pool2d_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 25, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 10240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 40
x1 = (xindex // 40)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), None)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (64*x1)), None)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (64*x1)), None)
tmp5 = tl.load(in_ptr0 + (3 + x0 + (64*x1)), None)
tmp7 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), None)
tmp9 = tl.load(in_ptr0 + (5 + x0 + (64*x1)), None)
tmp11 = tl.load(in_ptr0 + (6 + x0 + (64*x1)), None)
tmp13 = tl.load(in_ptr0 + (7 + x0 + (64*x1)), None)
tmp15 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), None)
tmp17 = tl.load(in_ptr0 + (9 + x0 + (64*x1)), None)
tmp19 = tl.load(in_ptr0 + (10 + x0 + (64*x1)), None)
tmp21 = tl.load(in_ptr0 + (11 + x0 + (64*x1)), None)
tmp23 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), None)
tmp25 = tl.load(in_ptr0 + (13 + x0 + (64*x1)), None)
tmp27 = tl.load(in_ptr0 + (14 + x0 + (64*x1)), None)
tmp29 = tl.load(in_ptr0 + (15 + x0 + (64*x1)), None)
tmp31 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), None)
tmp33 = tl.load(in_ptr0 + (17 + x0 + (64*x1)), None)
tmp35 = tl.load(in_ptr0 + (18 + x0 + (64*x1)), None)
tmp37 = tl.load(in_ptr0 + (19 + x0 + (64*x1)), None)
tmp39 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), None)
tmp41 = tl.load(in_ptr0 + (21 + x0 + (64*x1)), None)
tmp43 = tl.load(in_ptr0 + (22 + x0 + (64*x1)), None)
tmp45 = tl.load(in_ptr0 + (23 + x0 + (64*x1)), None)
tmp47 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), None)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + (x2), tmp50, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_4, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf2 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.bool)
buf3 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_2, buf2, buf3, 4194304, grid=grid(4194304), stream=stream0)
del buf1
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 64, 64), (524288, 1, 8192, 128))
buf5 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128), torch.bool)
buf6 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf4, primals_5, buf5, buf6, 2097152, grid=grid(2097152), stream=stream0)
del buf4
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf8 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool)
buf9 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf7, primals_7, buf8, buf9, 1048576, grid=grid(1048576), stream=stream0)
del buf7
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf11 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32), torch.bool)
buf12 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, out_3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf10, primals_9, buf11, buf12, 524288, grid=grid(524288), stream=stream0)
del buf10
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 1, 64, 64), (4096, 1, 64, 1))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, out_4], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_5.run(buf14, primals_11, 16384, grid=grid(16384), stream=stream0)
del primals_11
buf15 = empty_strided_cuda((4, 1, 64, 40), (2560, 2560, 40, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_6.run(buf14, buf15, 10240, grid=grid(10240), stream=stream0)
return (reinterpret_tensor(buf15, (10240, ), (1, ), 0), primals_1, buf0, primals_4, primals_6, primals_8, primals_10, buf2, buf3, buf5, buf6, buf8, buf9, buf11, buf12, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class FeatureDiscriminator(nn.Module):
def __init__(self):
super(FeatureDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels=512, out_channels=256,
kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(in_channels=256, out_channels=128,
kernel_size=1, stride=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=1, stride=1)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size
=1, stride=1)
self.conv5 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=
1, stride=1)
self.sig5 = nn.Sigmoid()
self.avg_pool = nn.AvgPool2d(kernel_size=(1, 25), stride=1)
self._do_initializer()
def _do_initializer(self):
for module in self.modules():
if isinstance(module, nn.Conv2d):
nn.init.normal_(tensor=module.weight, mean=0, std=0.01)
def forward(self, inputs):
out = F.leaky_relu(self.conv1(inputs))
out = F.leaky_relu(self.conv2(out))
out = F.leaky_relu(self.conv3(out))
out = F.leaky_relu(self.conv4(out))
out = self.sig5(self.conv5(out))
out = self.avg_pool(out)
out = out.view(-1)
return out
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, None)
@triton.jit
def triton_poi_fused_avg_pool2d_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 40
x1 = xindex // 40
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), None)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 64 * x1), None)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 64 * x1), None)
tmp5 = tl.load(in_ptr0 + (3 + x0 + 64 * x1), None)
tmp7 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), None)
tmp9 = tl.load(in_ptr0 + (5 + x0 + 64 * x1), None)
tmp11 = tl.load(in_ptr0 + (6 + x0 + 64 * x1), None)
tmp13 = tl.load(in_ptr0 + (7 + x0 + 64 * x1), None)
tmp15 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), None)
tmp17 = tl.load(in_ptr0 + (9 + x0 + 64 * x1), None)
tmp19 = tl.load(in_ptr0 + (10 + x0 + 64 * x1), None)
tmp21 = tl.load(in_ptr0 + (11 + x0 + 64 * x1), None)
tmp23 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), None)
tmp25 = tl.load(in_ptr0 + (13 + x0 + 64 * x1), None)
tmp27 = tl.load(in_ptr0 + (14 + x0 + 64 * x1), None)
tmp29 = tl.load(in_ptr0 + (15 + x0 + 64 * x1), None)
tmp31 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), None)
tmp33 = tl.load(in_ptr0 + (17 + x0 + 64 * x1), None)
tmp35 = tl.load(in_ptr0 + (18 + x0 + 64 * x1), None)
tmp37 = tl.load(in_ptr0 + (19 + x0 + 64 * x1), None)
tmp39 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), None)
tmp41 = tl.load(in_ptr0 + (21 + x0 + 64 * x1), None)
tmp43 = tl.load(in_ptr0 + (22 + x0 + 64 * x1), None)
tmp45 = tl.load(in_ptr0 + (23 + x0 + 64 * x1), None)
tmp47 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), None)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + x2, tmp50, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_4, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf2 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.bool)
buf3 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(4194304)](buf1,
primals_2, buf2, buf3, 4194304, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf1
del primals_2
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 64, 64), (524288, 1, 8192, 128))
buf5 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128),
torch.bool)
buf6 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128),
torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(2097152)](buf4,
primals_5, buf5, buf6, 2097152, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf4
del primals_5
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf8 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.bool)
buf9 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_convolution_leaky_relu_3[grid(1048576)](buf7,
primals_7, buf8, buf9, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf7
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf11 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32),
torch.bool)
buf12 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32),
torch.float32)
triton_poi_fused_convolution_leaky_relu_4[grid(524288)](buf10,
primals_9, buf11, buf12, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf10
del primals_9
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 1, 64, 64), (4096, 1, 64, 1))
buf14 = buf13
del buf13
triton_poi_fused_convolution_sigmoid_5[grid(16384)](buf14,
primals_11, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf15 = empty_strided_cuda((4, 1, 64, 40), (2560, 2560, 40, 1),
torch.float32)
triton_poi_fused_avg_pool2d_6[grid(10240)](buf14, buf15, 10240,
XBLOCK=128, num_warps=4, num_stages=1)
return (reinterpret_tensor(buf15, (10240,), (1,), 0), primals_1, buf0,
primals_4, primals_6, primals_8, primals_10, buf2, buf3, buf5, buf6,
buf8, buf9, buf11, buf12, buf14)
class FeatureDiscriminatorNew(nn.Module):
def __init__(self):
super(FeatureDiscriminatorNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=512, out_channels=256,
kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(in_channels=256, out_channels=128,
kernel_size=1, stride=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=1, stride=1)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size
=1, stride=1)
self.conv5 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=
1, stride=1)
self.sig5 = nn.Sigmoid()
self.avg_pool = nn.AvgPool2d(kernel_size=(1, 25), stride=1)
self._do_initializer()
def _do_initializer(self):
for module in self.modules():
if isinstance(module, nn.Conv2d):
nn.init.normal_(tensor=module.weight, mean=0, std=0.01)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| HotaekHan/Synthetically_Supervised_Text_Recognition | FeatureDiscriminator | false | 17,427 | [
"MIT"
] | 8 | a6bb7d3039b1280c6efe177b69d8b985d2e13285 | https://github.com/HotaekHan/Synthetically_Supervised_Text_Recognition/tree/a6bb7d3039b1280c6efe177b69d8b985d2e13285 |
ActorNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4g/c4guhk7x6skkidedvs2gxz2kcu6gb76l3ig5crjjvjtzvnjlhlte.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/im/cimjqfyrnbi2v4eg3hbj3b2rzjznianhv7isimhj2shrdgg2krgd.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_2 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5q/c5qb2e5cpnbkrjybp3njglebr6zp3r6vnzesbgrfkpsqim5oq7ks.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_2 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorNetwork(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super(ActorNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_size)
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.log_softmax(self.fc3(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__log_softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class ActorNetworkNew(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super(ActorNetworkNew, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| IandRover/meta-gradient_RL | ActorNetwork | false | 17,428 | [
"MIT"
] | 6 | 5d2539aceb9fa68b1849feac7d37741f9e5f83a3 | https://github.com/IandRover/meta-gradient_RL/tree/5d2539aceb9fa68b1849feac7d37741f9e5f83a3 |
CircleLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/mg/cmgr44khe2kcw23632to5qbyhb35zulqpneqemao3ohughblslmt.py
# Topologically Sorted Source Nodes: [sub, alpha_p, mul, sub_1, logit_p, logsumexp, add, alpha_n, mul_2, sub_2, logit_n, logsumexp_1, add_1, softplus], Original ATen: [aten.rsub, aten.clamp_min, aten.mul, aten.sub, aten.logsumexp, aten.add, aten.softplus]
# Source node to ATen node mapping:
# add => add
# add_1 => add_3
# alpha_n => clamp_min_1
# alpha_p => clamp_min
# logit_n => mul_3
# logit_p => mul_1
# logsumexp => abs_1, add_1, amax, eq, exp, full_default, log, sub_3, sum_1, where
# logsumexp_1 => abs_2, add_2, amax_1, eq_1, exp_1, full_default_1, log_1, sub_4, sum_2, where_1
# mul => mul
# mul_2 => mul_2
# softplus => exp_2, gt, log1p, where_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (5, %arg0_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, -4), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, -3), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sub_1), kwargs = {})
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%mul_1, [0], True), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_1, inf), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %amax), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0]), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %squeeze), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 4), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min_1, 4), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 4), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %sub_2), kwargs = {})
# %amax_1 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%mul_3, [0], True), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_1,), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_2, inf), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %amax_1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %where_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [0]), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_1, %squeeze_1), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %add_2), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_3, 20), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%add_3,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_3, %log1p), kwargs = {})
triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp10 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp24 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp46 = tl.load(in_ptr1 + (x0), xmask)
tmp53 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp60 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp67 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 5.0
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = -4.0
tmp6 = tmp4 * tmp5
tmp7 = -3.0
tmp8 = tmp0 - tmp7
tmp9 = tmp6 * tmp8
tmp11 = tmp1 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp3)
tmp13 = tmp12 * tmp5
tmp14 = tmp10 - tmp7
tmp15 = tmp13 * tmp14
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = tmp1 - tmp17
tmp19 = triton_helpers.maximum(tmp18, tmp3)
tmp20 = tmp19 * tmp5
tmp21 = tmp17 - tmp7
tmp22 = tmp20 * tmp21
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = tmp1 - tmp24
tmp26 = triton_helpers.maximum(tmp25, tmp3)
tmp27 = tmp26 * tmp5
tmp28 = tmp24 - tmp7
tmp29 = tmp27 * tmp28
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tl_math.abs(tmp30)
tmp32 = float("inf")
tmp33 = tmp31 == tmp32
tmp34 = tl.where(tmp33, tmp3, tmp30)
tmp35 = tmp9 - tmp34
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp15 - tmp34
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp40 = tmp22 - tmp34
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp39 + tmp41
tmp43 = tmp29 - tmp34
tmp44 = tl_math.exp(tmp43)
tmp45 = tmp42 + tmp44
tmp47 = 4.0
tmp48 = tmp46 + tmp47
tmp49 = triton_helpers.maximum(tmp48, tmp3)
tmp50 = tmp49 * tmp47
tmp51 = tmp46 - tmp47
tmp52 = tmp50 * tmp51
tmp54 = tmp53 + tmp47
tmp55 = triton_helpers.maximum(tmp54, tmp3)
tmp56 = tmp55 * tmp47
tmp57 = tmp53 - tmp47
tmp58 = tmp56 * tmp57
tmp59 = triton_helpers.maximum(tmp52, tmp58)
tmp61 = tmp60 + tmp47
tmp62 = triton_helpers.maximum(tmp61, tmp3)
tmp63 = tmp62 * tmp47
tmp64 = tmp60 - tmp47
tmp65 = tmp63 * tmp64
tmp66 = triton_helpers.maximum(tmp59, tmp65)
tmp68 = tmp67 + tmp47
tmp69 = triton_helpers.maximum(tmp68, tmp3)
tmp70 = tmp69 * tmp47
tmp71 = tmp67 - tmp47
tmp72 = tmp70 * tmp71
tmp73 = triton_helpers.maximum(tmp66, tmp72)
tmp74 = tl_math.abs(tmp73)
tmp75 = tmp74 == tmp32
tmp76 = tl.where(tmp75, tmp3, tmp73)
tmp77 = tmp52 - tmp76
tmp78 = tl_math.exp(tmp77)
tmp79 = tmp58 - tmp76
tmp80 = tl_math.exp(tmp79)
tmp81 = tmp78 + tmp80
tmp82 = tmp65 - tmp76
tmp83 = tl_math.exp(tmp82)
tmp84 = tmp81 + tmp83
tmp85 = tmp72 - tmp76
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp84 + tmp86
tmp88 = tl_math.log(tmp45)
tmp89 = tmp88 + tmp34
tmp90 = tl_math.log(tmp87)
tmp91 = tmp90 + tmp76
tmp92 = tmp89 + tmp91
tmp93 = 20.0
tmp94 = tmp92 > tmp93
tmp95 = tl_math.exp(tmp92)
tmp96 = libdevice.log1p(tmp95)
tmp97 = tl.where(tmp94, tmp92, tmp96)
tl.store(in_out_ptr0 + (x0), tmp97, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, alpha_p, mul, sub_1, logit_p, logsumexp, add, alpha_n, mul_2, sub_2, logit_n, logsumexp_1, add_1, softplus], Original ATen: [aten.rsub, aten.clamp_min, aten.mul, aten.sub, aten.logsumexp, aten.add, aten.softplus]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0.run(buf5, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import *
import torch.nn as nn
import torch.nn.functional as F
class CircleLoss(nn.Module):
def __init__(self, gamma, m):
super().__init__()
self.gamma = gamma
self.m = m
def forward(self, s_p, s_n):
alpha_p = torch.clamp_min(1 + self.m - s_p, 0)
alpha_n = torch.clamp_min(self.m + s_n, 0)
delta_p = 1 - self.m
delta_n = self.m
logit_p = -self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
return F.softplus(torch.logsumexp(logit_p, dim=0) + torch.logsumexp
(logit_n, dim=0))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'gamma': 4, 'm': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import *
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp10 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp24 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp46 = tl.load(in_ptr1 + x0, xmask)
tmp53 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp60 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp67 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 5.0
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = -4.0
tmp6 = tmp4 * tmp5
tmp7 = -3.0
tmp8 = tmp0 - tmp7
tmp9 = tmp6 * tmp8
tmp11 = tmp1 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp3)
tmp13 = tmp12 * tmp5
tmp14 = tmp10 - tmp7
tmp15 = tmp13 * tmp14
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = tmp1 - tmp17
tmp19 = triton_helpers.maximum(tmp18, tmp3)
tmp20 = tmp19 * tmp5
tmp21 = tmp17 - tmp7
tmp22 = tmp20 * tmp21
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = tmp1 - tmp24
tmp26 = triton_helpers.maximum(tmp25, tmp3)
tmp27 = tmp26 * tmp5
tmp28 = tmp24 - tmp7
tmp29 = tmp27 * tmp28
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tl_math.abs(tmp30)
tmp32 = float('inf')
tmp33 = tmp31 == tmp32
tmp34 = tl.where(tmp33, tmp3, tmp30)
tmp35 = tmp9 - tmp34
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp15 - tmp34
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp40 = tmp22 - tmp34
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp39 + tmp41
tmp43 = tmp29 - tmp34
tmp44 = tl_math.exp(tmp43)
tmp45 = tmp42 + tmp44
tmp47 = 4.0
tmp48 = tmp46 + tmp47
tmp49 = triton_helpers.maximum(tmp48, tmp3)
tmp50 = tmp49 * tmp47
tmp51 = tmp46 - tmp47
tmp52 = tmp50 * tmp51
tmp54 = tmp53 + tmp47
tmp55 = triton_helpers.maximum(tmp54, tmp3)
tmp56 = tmp55 * tmp47
tmp57 = tmp53 - tmp47
tmp58 = tmp56 * tmp57
tmp59 = triton_helpers.maximum(tmp52, tmp58)
tmp61 = tmp60 + tmp47
tmp62 = triton_helpers.maximum(tmp61, tmp3)
tmp63 = tmp62 * tmp47
tmp64 = tmp60 - tmp47
tmp65 = tmp63 * tmp64
tmp66 = triton_helpers.maximum(tmp59, tmp65)
tmp68 = tmp67 + tmp47
tmp69 = triton_helpers.maximum(tmp68, tmp3)
tmp70 = tmp69 * tmp47
tmp71 = tmp67 - tmp47
tmp72 = tmp70 * tmp71
tmp73 = triton_helpers.maximum(tmp66, tmp72)
tmp74 = tl_math.abs(tmp73)
tmp75 = tmp74 == tmp32
tmp76 = tl.where(tmp75, tmp3, tmp73)
tmp77 = tmp52 - tmp76
tmp78 = tl_math.exp(tmp77)
tmp79 = tmp58 - tmp76
tmp80 = tl_math.exp(tmp79)
tmp81 = tmp78 + tmp80
tmp82 = tmp65 - tmp76
tmp83 = tl_math.exp(tmp82)
tmp84 = tmp81 + tmp83
tmp85 = tmp72 - tmp76
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp84 + tmp86
tmp88 = tl_math.log(tmp45)
tmp89 = tmp88 + tmp34
tmp90 = tl_math.log(tmp87)
tmp91 = tmp90 + tmp76
tmp92 = tmp89 + tmp91
tmp93 = 20.0
tmp94 = tmp92 > tmp93
tmp95 = tl_math.exp(tmp92)
tmp96 = libdevice.log1p(tmp95)
tmp97 = tl.where(tmp94, tmp92, tmp96)
tl.store(in_out_ptr0 + x0, tmp97, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0[grid
(64)](buf5, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
return buf5,
class CircleLossNew(nn.Module):
def __init__(self, gamma, m):
super().__init__()
self.gamma = gamma
self.m = m
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| IntelLabs/MICSAS | CircleLoss | false | 17,429 | [
"MIT",
"BSD-3-Clause"
] | 7 | 4124991a683cc10004e403f3f3eb442f58616519 | https://github.com/IntelLabs/MICSAS/tree/4124991a683cc10004e403f3f3eb442f58616519 |
GeneralRelu | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/2r/c2rkj22ztom44dtfrpj2uok5zc76tpegorlicksd2vm2decu7zzh.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
from typing import *
class GeneralRelu(nn.Module):
def __init__(self, leak=None, sub=None, maxv=None):
super().__init__()
self.leak, self.sub, self.maxv = leak, sub, maxv
def forward(self, x):
x = F.leaky_relu(x, self.leak) if self.leak is not None else F.relu(x)
if self.sub is not None:
x.sub_(self.sub)
if self.maxv is not None:
x.clamp_max_(self.maxv)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GeneralReluNew(nn.Module):
def __init__(self, leak=None, sub=None, maxv=None):
super().__init__()
self.leak, self.sub, self.maxv = leak, sub, maxv
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ImadDabbura/fastai-courses | GeneralRelu | false | 17,430 | [
"Apache-2.0"
] | 3 | 053637a2dd3b4ad6c35f97a13f3fba87af1d3940 | https://github.com/ImadDabbura/fastai-courses/tree/053637a2dd3b4ad6c35f97a13f3fba87af1d3940 |
NetVLAD | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/u3/cu37kuzlgpwonwye6xqtjvxt5kv252foqxbw5zhbogqli35uxeev.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# x => pow_1, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
triton_red_fused_linalg_vector_norm_0 = async_compile.triton('triton_red_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[16384, 128],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 16384
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x1 = (xindex // 4096)
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (524288*x1)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wq/cwq7qfgyxqxicmg3fonn5xsvt5ynmcr7p2tzqkxrtriri6m6lps7.py
# Topologically Sorted Source Nodes: [x, residual_2, residual_4, residual_6, residual_8, residual_10, residual_12, residual_14, residual_16, residual_18, residual_20, residual_22, residual_24, residual_26, residual_28, residual_30, residual_32, residual_34, residual_36, residual_38, residual_40, residual_42, residual_44, residual_46, residual_48, residual_50, residual_52, residual_54, residual_56, residual_58, residual_60, residual_62, residual_64, residual_66, residual_68, residual_70, residual_72, residual_74, residual_76, residual_78, residual_80, residual_82, residual_84, residual_86, residual_88, residual_90, residual_92, residual_94, residual_96, residual_98, residual_100, residual_102, residual_104, residual_106, residual_108, residual_110, residual_112, residual_114, residual_116, residual_118, residual_120, residual_122, residual_124, residual_126], Original ATen: [aten.div, aten.sub]
# Source node to ATen node mapping:
# residual_10 => sub_6
# residual_100 => sub_51
# residual_102 => sub_52
# residual_104 => sub_53
# residual_106 => sub_54
# residual_108 => sub_55
# residual_110 => sub_56
# residual_112 => sub_57
# residual_114 => sub_58
# residual_116 => sub_59
# residual_118 => sub_60
# residual_12 => sub_7
# residual_120 => sub_61
# residual_122 => sub_62
# residual_124 => sub_63
# residual_126 => sub_64
# residual_14 => sub_8
# residual_16 => sub_9
# residual_18 => sub_10
# residual_2 => sub_2
# residual_20 => sub_11
# residual_22 => sub_12
# residual_24 => sub_13
# residual_26 => sub_14
# residual_28 => sub_15
# residual_30 => sub_16
# residual_32 => sub_17
# residual_34 => sub_18
# residual_36 => sub_19
# residual_38 => sub_20
# residual_4 => sub_3
# residual_40 => sub_21
# residual_42 => sub_22
# residual_44 => sub_23
# residual_46 => sub_24
# residual_48 => sub_25
# residual_50 => sub_26
# residual_52 => sub_27
# residual_54 => sub_28
# residual_56 => sub_29
# residual_58 => sub_30
# residual_6 => sub_4
# residual_60 => sub_31
# residual_62 => sub_32
# residual_64 => sub_33
# residual_66 => sub_34
# residual_68 => sub_35
# residual_70 => sub_36
# residual_72 => sub_37
# residual_74 => sub_38
# residual_76 => sub_39
# residual_78 => sub_40
# residual_8 => sub_5
# residual_80 => sub_41
# residual_82 => sub_42
# residual_84 => sub_43
# residual_86 => sub_44
# residual_88 => sub_45
# residual_90 => sub_46
# residual_92 => sub_47
# residual_94 => sub_48
# residual_96 => sub_49
# residual_98 => sub_50
# x => div
# Graph fragment:
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_4), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_7), kwargs = {})
# %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_10), kwargs = {})
# %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_13), kwargs = {})
# %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_16), kwargs = {})
# %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_19), kwargs = {})
# %sub_8 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_22), kwargs = {})
# %sub_9 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_25), kwargs = {})
# %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_28), kwargs = {})
# %sub_11 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_31), kwargs = {})
# %sub_12 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_34), kwargs = {})
# %sub_13 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_37), kwargs = {})
# %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_40), kwargs = {})
# %sub_15 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_43), kwargs = {})
# %sub_16 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_46), kwargs = {})
# %sub_17 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_49), kwargs = {})
# %sub_18 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_52), kwargs = {})
# %sub_19 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_55), kwargs = {})
# %sub_20 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_58), kwargs = {})
# %sub_21 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_61), kwargs = {})
# %sub_22 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_64), kwargs = {})
# %sub_23 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_67), kwargs = {})
# %sub_24 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_70), kwargs = {})
# %sub_25 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_73), kwargs = {})
# %sub_26 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_76), kwargs = {})
# %sub_27 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_79), kwargs = {})
# %sub_28 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_82), kwargs = {})
# %sub_29 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_85), kwargs = {})
# %sub_30 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_88), kwargs = {})
# %sub_31 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_91), kwargs = {})
# %sub_32 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_94), kwargs = {})
# %sub_33 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_97), kwargs = {})
# %sub_34 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_100), kwargs = {})
# %sub_35 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_103), kwargs = {})
# %sub_36 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_106), kwargs = {})
# %sub_37 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_109), kwargs = {})
# %sub_38 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_112), kwargs = {})
# %sub_39 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_115), kwargs = {})
# %sub_40 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_118), kwargs = {})
# %sub_41 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_121), kwargs = {})
# %sub_42 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_124), kwargs = {})
# %sub_43 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_127), kwargs = {})
# %sub_44 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_130), kwargs = {})
# %sub_45 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_133), kwargs = {})
# %sub_46 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_136), kwargs = {})
# %sub_47 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_139), kwargs = {})
# %sub_48 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_142), kwargs = {})
# %sub_49 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_145), kwargs = {})
# %sub_50 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_148), kwargs = {})
# %sub_51 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_151), kwargs = {})
# %sub_52 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_154), kwargs = {})
# %sub_53 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_157), kwargs = {})
# %sub_54 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_160), kwargs = {})
# %sub_55 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_163), kwargs = {})
# %sub_56 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_166), kwargs = {})
# %sub_57 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_169), kwargs = {})
# %sub_58 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_172), kwargs = {})
# %sub_59 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_175), kwargs = {})
# %sub_60 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_178), kwargs = {})
# %sub_61 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_181), kwargs = {})
# %sub_62 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_184), kwargs = {})
# %sub_63 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_187), kwargs = {})
# %sub_64 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_190), kwargs = {})
triton_poi_fused_div_sub_1 = async_compile.triton('triton_poi_fused_div_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: '*fp32', 67: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 65, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61, out_ptr62, out_ptr63, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 524288)
x1 = (xindex // 4096) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last')
tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last')
tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last')
tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last')
tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last')
tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last')
tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last')
tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last')
tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last')
tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last')
tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last')
tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last')
tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last')
tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last')
tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last')
tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last')
tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last')
tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last')
tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last')
tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last')
tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last')
tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last')
tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last')
tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last')
tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last')
tmp2 = libdevice.sqrt(tmp1)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tmp7 = tmp5 - tmp6
tmp9 = tmp5 - tmp8
tmp11 = tmp5 - tmp10
tmp13 = tmp5 - tmp12
tmp15 = tmp5 - tmp14
tmp17 = tmp5 - tmp16
tmp19 = tmp5 - tmp18
tmp21 = tmp5 - tmp20
tmp23 = tmp5 - tmp22
tmp25 = tmp5 - tmp24
tmp27 = tmp5 - tmp26
tmp29 = tmp5 - tmp28
tmp31 = tmp5 - tmp30
tmp33 = tmp5 - tmp32
tmp35 = tmp5 - tmp34
tmp37 = tmp5 - tmp36
tmp39 = tmp5 - tmp38
tmp41 = tmp5 - tmp40
tmp43 = tmp5 - tmp42
tmp45 = tmp5 - tmp44
tmp47 = tmp5 - tmp46
tmp49 = tmp5 - tmp48
tmp51 = tmp5 - tmp50
tmp53 = tmp5 - tmp52
tmp55 = tmp5 - tmp54
tmp57 = tmp5 - tmp56
tmp59 = tmp5 - tmp58
tmp61 = tmp5 - tmp60
tmp63 = tmp5 - tmp62
tmp65 = tmp5 - tmp64
tmp67 = tmp5 - tmp66
tmp69 = tmp5 - tmp68
tmp71 = tmp5 - tmp70
tmp73 = tmp5 - tmp72
tmp75 = tmp5 - tmp74
tmp77 = tmp5 - tmp76
tmp79 = tmp5 - tmp78
tmp81 = tmp5 - tmp80
tmp83 = tmp5 - tmp82
tmp85 = tmp5 - tmp84
tmp87 = tmp5 - tmp86
tmp89 = tmp5 - tmp88
tmp91 = tmp5 - tmp90
tmp93 = tmp5 - tmp92
tmp95 = tmp5 - tmp94
tmp97 = tmp5 - tmp96
tmp99 = tmp5 - tmp98
tmp101 = tmp5 - tmp100
tmp103 = tmp5 - tmp102
tmp105 = tmp5 - tmp104
tmp107 = tmp5 - tmp106
tmp109 = tmp5 - tmp108
tmp111 = tmp5 - tmp110
tmp113 = tmp5 - tmp112
tmp115 = tmp5 - tmp114
tmp117 = tmp5 - tmp116
tmp119 = tmp5 - tmp118
tmp121 = tmp5 - tmp120
tmp123 = tmp5 - tmp122
tmp125 = tmp5 - tmp124
tmp127 = tmp5 - tmp126
tmp129 = tmp5 - tmp128
tmp131 = tmp5 - tmp130
tl.store(out_ptr0 + (x3), tmp5, None)
tl.store(out_ptr1 + (x3), tmp7, None)
tl.store(out_ptr2 + (x3), tmp9, None)
tl.store(out_ptr3 + (x3), tmp11, None)
tl.store(out_ptr4 + (x3), tmp13, None)
tl.store(out_ptr5 + (x3), tmp15, None)
tl.store(out_ptr6 + (x3), tmp17, None)
tl.store(out_ptr7 + (x3), tmp19, None)
tl.store(out_ptr8 + (x3), tmp21, None)
tl.store(out_ptr9 + (x3), tmp23, None)
tl.store(out_ptr10 + (x3), tmp25, None)
tl.store(out_ptr11 + (x3), tmp27, None)
tl.store(out_ptr12 + (x3), tmp29, None)
tl.store(out_ptr13 + (x3), tmp31, None)
tl.store(out_ptr14 + (x3), tmp33, None)
tl.store(out_ptr15 + (x3), tmp35, None)
tl.store(out_ptr16 + (x3), tmp37, None)
tl.store(out_ptr17 + (x3), tmp39, None)
tl.store(out_ptr18 + (x3), tmp41, None)
tl.store(out_ptr19 + (x3), tmp43, None)
tl.store(out_ptr20 + (x3), tmp45, None)
tl.store(out_ptr21 + (x3), tmp47, None)
tl.store(out_ptr22 + (x3), tmp49, None)
tl.store(out_ptr23 + (x3), tmp51, None)
tl.store(out_ptr24 + (x3), tmp53, None)
tl.store(out_ptr25 + (x3), tmp55, None)
tl.store(out_ptr26 + (x3), tmp57, None)
tl.store(out_ptr27 + (x3), tmp59, None)
tl.store(out_ptr28 + (x3), tmp61, None)
tl.store(out_ptr29 + (x3), tmp63, None)
tl.store(out_ptr30 + (x3), tmp65, None)
tl.store(out_ptr31 + (x3), tmp67, None)
tl.store(out_ptr32 + (x3), tmp69, None)
tl.store(out_ptr33 + (x3), tmp71, None)
tl.store(out_ptr34 + (x3), tmp73, None)
tl.store(out_ptr35 + (x3), tmp75, None)
tl.store(out_ptr36 + (x3), tmp77, None)
tl.store(out_ptr37 + (x3), tmp79, None)
tl.store(out_ptr38 + (x3), tmp81, None)
tl.store(out_ptr39 + (x3), tmp83, None)
tl.store(out_ptr40 + (x3), tmp85, None)
tl.store(out_ptr41 + (x3), tmp87, None)
tl.store(out_ptr42 + (x3), tmp89, None)
tl.store(out_ptr43 + (x3), tmp91, None)
tl.store(out_ptr44 + (x3), tmp93, None)
tl.store(out_ptr45 + (x3), tmp95, None)
tl.store(out_ptr46 + (x3), tmp97, None)
tl.store(out_ptr47 + (x3), tmp99, None)
tl.store(out_ptr48 + (x3), tmp101, None)
tl.store(out_ptr49 + (x3), tmp103, None)
tl.store(out_ptr50 + (x3), tmp105, None)
tl.store(out_ptr51 + (x3), tmp107, None)
tl.store(out_ptr52 + (x3), tmp109, None)
tl.store(out_ptr53 + (x3), tmp111, None)
tl.store(out_ptr54 + (x3), tmp113, None)
tl.store(out_ptr55 + (x3), tmp115, None)
tl.store(out_ptr56 + (x3), tmp117, None)
tl.store(out_ptr57 + (x3), tmp119, None)
tl.store(out_ptr58 + (x3), tmp121, None)
tl.store(out_ptr59 + (x3), tmp123, None)
tl.store(out_ptr60 + (x3), tmp125, None)
tl.store(out_ptr61 + (x3), tmp127, None)
tl.store(out_ptr62 + (x3), tmp129, None)
tl.store(out_ptr63 + (x3), tmp131, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ek/cekqsaezxkuovn5rct7qatnrrzjs47ux33pgzisk7yjciustu7zp.py
# Topologically Sorted Source Nodes: [soft_assign_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# soft_assign_1 => amax, exp, sub, sum_2
# Graph fragment:
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16384, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16384
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4096
x1 = (xindex // 4096)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (262144*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp3, None)
tl.store(out_ptr1 + (x3), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hv/chvod4i5xws3vo3y5v4kvbqrbhpe24n7uq3izupyixha3jxeonu3.py
# Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_3, sum_2, residual_5, sum_3, residual_7, sum_4, residual_9, sum_5, residual_11, sum_6, residual_13, sum_7, residual_15, sum_8, residual_17, sum_9, residual_19, sum_10, residual_21, sum_11, residual_23, sum_12, residual_25, sum_13, residual_27, sum_14, residual_29, sum_15, residual_31, sum_16, residual_33, sum_17, residual_35, sum_18, residual_37, sum_19, residual_39, sum_20, residual_41, sum_21, residual_43, sum_22, residual_45, sum_23, residual_47, sum_24, residual_49, sum_25, residual_51, sum_26, residual_53, sum_27, residual_55, sum_28, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# residual => sub_1
# residual_1 => mul
# residual_11 => mul_5
# residual_13 => mul_6
# residual_15 => mul_7
# residual_17 => mul_8
# residual_19 => mul_9
# residual_21 => mul_10
# residual_23 => mul_11
# residual_25 => mul_12
# residual_27 => mul_13
# residual_29 => mul_14
# residual_3 => mul_1
# residual_31 => mul_15
# residual_33 => mul_16
# residual_35 => mul_17
# residual_37 => mul_18
# residual_39 => mul_19
# residual_41 => mul_20
# residual_43 => mul_21
# residual_45 => mul_22
# residual_47 => mul_23
# residual_49 => mul_24
# residual_5 => mul_2
# residual_51 => mul_25
# residual_53 => mul_26
# residual_55 => mul_27
# residual_57 => mul_28
# residual_7 => mul_3
# residual_9 => mul_4
# sum_1 => sum_3
# sum_10 => sum_12
# sum_11 => sum_13
# sum_12 => sum_14
# sum_13 => sum_15
# sum_14 => sum_16
# sum_15 => sum_17
# sum_16 => sum_18
# sum_17 => sum_19
# sum_18 => sum_20
# sum_19 => sum_21
# sum_2 => sum_4
# sum_20 => sum_22
# sum_21 => sum_23
# sum_22 => sum_24
# sum_23 => sum_25
# sum_24 => sum_26
# sum_25 => sum_27
# sum_26 => sum_28
# sum_27 => sum_29
# sum_28 => sum_30
# sum_29 => sum_31
# sum_3 => sum_5
# sum_4 => sum_6
# sum_5 => sum_7
# sum_6 => sum_8
# sum_7 => sum_9
# sum_8 => sum_10
# sum_9 => sum_11
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %unsqueeze_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %unsqueeze_5), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %unsqueeze_8), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %unsqueeze_11), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [-1]), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %unsqueeze_14), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [-1]), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %unsqueeze_17), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [-1]), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %unsqueeze_20), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [-1]), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %unsqueeze_23), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [-1]), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %unsqueeze_26), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [-1]), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %unsqueeze_29), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [-1]), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %unsqueeze_32), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [-1]), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %unsqueeze_35), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_11, [-1]), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %unsqueeze_38), kwargs = {})
# %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [-1]), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %unsqueeze_41), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_13, [-1]), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %unsqueeze_44), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_14, [-1]), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %unsqueeze_47), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_15, [-1]), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %unsqueeze_50), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_16, [-1]), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %unsqueeze_53), kwargs = {})
# %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_17, [-1]), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %unsqueeze_56), kwargs = {})
# %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_18, [-1]), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %unsqueeze_59), kwargs = {})
# %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_19, [-1]), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %unsqueeze_62), kwargs = {})
# %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_20, [-1]), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %unsqueeze_65), kwargs = {})
# %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_21, [-1]), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %unsqueeze_68), kwargs = {})
# %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_22, [-1]), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %unsqueeze_71), kwargs = {})
# %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_23, [-1]), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %unsqueeze_74), kwargs = {})
# %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_24, [-1]), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %unsqueeze_77), kwargs = {})
# %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_25, [-1]), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %unsqueeze_80), kwargs = {})
# %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_26, [-1]), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_28, %unsqueeze_83), kwargs = {})
# %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_27, [-1]), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %unsqueeze_86), kwargs = {})
# %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_28, [-1]), kwargs = {})
triton_red_fused_mul_sub_sum_3 = async_compile.triton('triton_red_fused_mul_sub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[512, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: 'i32', 63: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 61, 'num_reduction': 29, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 128
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
x1 = (xindex // 128)
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp3 = tl.load(in_ptr2 + (r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr4 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp14 = tl.load(in_ptr2 + (4096 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp23 = tl.load(in_ptr2 + (8192 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp32 = tl.load(in_ptr2 + (12288 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp41 = tl.load(in_ptr2 + (16384 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp50 = tl.load(in_ptr2 + (20480 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr10 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp59 = tl.load(in_ptr2 + (24576 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp67 = tl.load(in_ptr11 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp68 = tl.load(in_ptr2 + (28672 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr12 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp77 = tl.load(in_ptr2 + (32768 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp85 = tl.load(in_ptr13 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp86 = tl.load(in_ptr2 + (36864 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp94 = tl.load(in_ptr14 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp95 = tl.load(in_ptr2 + (40960 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp103 = tl.load(in_ptr15 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp104 = tl.load(in_ptr2 + (45056 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp112 = tl.load(in_ptr16 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp113 = tl.load(in_ptr2 + (49152 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp121 = tl.load(in_ptr17 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp122 = tl.load(in_ptr2 + (53248 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp130 = tl.load(in_ptr18 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp131 = tl.load(in_ptr2 + (57344 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp139 = tl.load(in_ptr19 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp140 = tl.load(in_ptr2 + (61440 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp148 = tl.load(in_ptr20 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp149 = tl.load(in_ptr2 + (65536 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp157 = tl.load(in_ptr21 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp158 = tl.load(in_ptr2 + (69632 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp166 = tl.load(in_ptr22 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp167 = tl.load(in_ptr2 + (73728 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp175 = tl.load(in_ptr23 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp176 = tl.load(in_ptr2 + (77824 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp184 = tl.load(in_ptr24 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp185 = tl.load(in_ptr2 + (81920 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp193 = tl.load(in_ptr25 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp194 = tl.load(in_ptr2 + (86016 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp202 = tl.load(in_ptr26 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp203 = tl.load(in_ptr2 + (90112 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp211 = tl.load(in_ptr27 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp212 = tl.load(in_ptr2 + (94208 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp220 = tl.load(in_ptr28 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp221 = tl.load(in_ptr2 + (98304 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp229 = tl.load(in_ptr29 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp230 = tl.load(in_ptr2 + (102400 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp238 = tl.load(in_ptr30 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp239 = tl.load(in_ptr2 + (106496 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp247 = tl.load(in_ptr31 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp248 = tl.load(in_ptr2 + (110592 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp256 = tl.load(in_ptr32 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp257 = tl.load(in_ptr2 + (114688 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask & xmask, tmp12, _tmp11)
tmp15 = tmp14 - tmp4
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 / tmp7
tmp18 = tmp13 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = _tmp20 + tmp19
_tmp20 = tl.where(rmask & xmask, tmp21, _tmp20)
tmp24 = tmp23 - tmp4
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp25 / tmp7
tmp27 = tmp22 * tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = _tmp29 + tmp28
_tmp29 = tl.where(rmask & xmask, tmp30, _tmp29)
tmp33 = tmp32 - tmp4
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp34 / tmp7
tmp36 = tmp31 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = _tmp38 + tmp37
_tmp38 = tl.where(rmask & xmask, tmp39, _tmp38)
tmp42 = tmp41 - tmp4
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp43 / tmp7
tmp45 = tmp40 * tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp48 = _tmp47 + tmp46
_tmp47 = tl.where(rmask & xmask, tmp48, _tmp47)
tmp51 = tmp50 - tmp4
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp52 / tmp7
tmp54 = tmp49 * tmp53
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp57 = _tmp56 + tmp55
_tmp56 = tl.where(rmask & xmask, tmp57, _tmp56)
tmp60 = tmp59 - tmp4
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp61 / tmp7
tmp63 = tmp58 * tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = _tmp65 + tmp64
_tmp65 = tl.where(rmask & xmask, tmp66, _tmp65)
tmp69 = tmp68 - tmp4
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp70 / tmp7
tmp72 = tmp67 * tmp71
tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK])
tmp75 = _tmp74 + tmp73
_tmp74 = tl.where(rmask & xmask, tmp75, _tmp74)
tmp78 = tmp77 - tmp4
tmp79 = tl_math.exp(tmp78)
tmp80 = tmp79 / tmp7
tmp81 = tmp76 * tmp80
tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK])
tmp84 = _tmp83 + tmp82
_tmp83 = tl.where(rmask & xmask, tmp84, _tmp83)
tmp87 = tmp86 - tmp4
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp88 / tmp7
tmp90 = tmp85 * tmp89
tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK])
tmp93 = _tmp92 + tmp91
_tmp92 = tl.where(rmask & xmask, tmp93, _tmp92)
tmp96 = tmp95 - tmp4
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp97 / tmp7
tmp99 = tmp94 * tmp98
tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK])
tmp102 = _tmp101 + tmp100
_tmp101 = tl.where(rmask & xmask, tmp102, _tmp101)
tmp105 = tmp104 - tmp4
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp106 / tmp7
tmp108 = tmp103 * tmp107
tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK])
tmp111 = _tmp110 + tmp109
_tmp110 = tl.where(rmask & xmask, tmp111, _tmp110)
tmp114 = tmp113 - tmp4
tmp115 = tl_math.exp(tmp114)
tmp116 = tmp115 / tmp7
tmp117 = tmp112 * tmp116
tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK])
tmp120 = _tmp119 + tmp118
_tmp119 = tl.where(rmask & xmask, tmp120, _tmp119)
tmp123 = tmp122 - tmp4
tmp124 = tl_math.exp(tmp123)
tmp125 = tmp124 / tmp7
tmp126 = tmp121 * tmp125
tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK])
tmp129 = _tmp128 + tmp127
_tmp128 = tl.where(rmask & xmask, tmp129, _tmp128)
tmp132 = tmp131 - tmp4
tmp133 = tl_math.exp(tmp132)
tmp134 = tmp133 / tmp7
tmp135 = tmp130 * tmp134
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp138 = _tmp137 + tmp136
_tmp137 = tl.where(rmask & xmask, tmp138, _tmp137)
tmp141 = tmp140 - tmp4
tmp142 = tl_math.exp(tmp141)
tmp143 = tmp142 / tmp7
tmp144 = tmp139 * tmp143
tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK])
tmp147 = _tmp146 + tmp145
_tmp146 = tl.where(rmask & xmask, tmp147, _tmp146)
tmp150 = tmp149 - tmp4
tmp151 = tl_math.exp(tmp150)
tmp152 = tmp151 / tmp7
tmp153 = tmp148 * tmp152
tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK])
tmp156 = _tmp155 + tmp154
_tmp155 = tl.where(rmask & xmask, tmp156, _tmp155)
tmp159 = tmp158 - tmp4
tmp160 = tl_math.exp(tmp159)
tmp161 = tmp160 / tmp7
tmp162 = tmp157 * tmp161
tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK])
tmp165 = _tmp164 + tmp163
_tmp164 = tl.where(rmask & xmask, tmp165, _tmp164)
tmp168 = tmp167 - tmp4
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp169 / tmp7
tmp171 = tmp166 * tmp170
tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK])
tmp174 = _tmp173 + tmp172
_tmp173 = tl.where(rmask & xmask, tmp174, _tmp173)
tmp177 = tmp176 - tmp4
tmp178 = tl_math.exp(tmp177)
tmp179 = tmp178 / tmp7
tmp180 = tmp175 * tmp179
tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK])
tmp183 = _tmp182 + tmp181
_tmp182 = tl.where(rmask & xmask, tmp183, _tmp182)
tmp186 = tmp185 - tmp4
tmp187 = tl_math.exp(tmp186)
tmp188 = tmp187 / tmp7
tmp189 = tmp184 * tmp188
tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK])
tmp192 = _tmp191 + tmp190
_tmp191 = tl.where(rmask & xmask, tmp192, _tmp191)
tmp195 = tmp194 - tmp4
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp196 / tmp7
tmp198 = tmp193 * tmp197
tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK])
tmp201 = _tmp200 + tmp199
_tmp200 = tl.where(rmask & xmask, tmp201, _tmp200)
tmp204 = tmp203 - tmp4
tmp205 = tl_math.exp(tmp204)
tmp206 = tmp205 / tmp7
tmp207 = tmp202 * tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = _tmp209 + tmp208
_tmp209 = tl.where(rmask & xmask, tmp210, _tmp209)
tmp213 = tmp212 - tmp4
tmp214 = tl_math.exp(tmp213)
tmp215 = tmp214 / tmp7
tmp216 = tmp211 * tmp215
tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK])
tmp219 = _tmp218 + tmp217
_tmp218 = tl.where(rmask & xmask, tmp219, _tmp218)
tmp222 = tmp221 - tmp4
tmp223 = tl_math.exp(tmp222)
tmp224 = tmp223 / tmp7
tmp225 = tmp220 * tmp224
tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK])
tmp228 = _tmp227 + tmp226
_tmp227 = tl.where(rmask & xmask, tmp228, _tmp227)
tmp231 = tmp230 - tmp4
tmp232 = tl_math.exp(tmp231)
tmp233 = tmp232 / tmp7
tmp234 = tmp229 * tmp233
tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK])
tmp237 = _tmp236 + tmp235
_tmp236 = tl.where(rmask & xmask, tmp237, _tmp236)
tmp240 = tmp239 - tmp4
tmp241 = tl_math.exp(tmp240)
tmp242 = tmp241 / tmp7
tmp243 = tmp238 * tmp242
tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK])
tmp246 = _tmp245 + tmp244
_tmp245 = tl.where(rmask & xmask, tmp246, _tmp245)
tmp249 = tmp248 - tmp4
tmp250 = tl_math.exp(tmp249)
tmp251 = tmp250 / tmp7
tmp252 = tmp247 * tmp251
tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK])
tmp255 = _tmp254 + tmp253
_tmp254 = tl.where(rmask & xmask, tmp255, _tmp254)
tmp258 = tmp257 - tmp4
tmp259 = tl_math.exp(tmp258)
tmp260 = tmp259 / tmp7
tmp261 = tmp256 * tmp260
tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK])
tmp264 = _tmp263 + tmp262
_tmp263 = tl.where(rmask & xmask, tmp264, _tmp263)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp11, xmask)
tmp20 = tl.sum(_tmp20, 1)[:, None]
tl.store(out_ptr1 + (x3), tmp20, xmask)
tmp29 = tl.sum(_tmp29, 1)[:, None]
tl.store(out_ptr2 + (x3), tmp29, xmask)
tmp38 = tl.sum(_tmp38, 1)[:, None]
tl.store(out_ptr3 + (x3), tmp38, xmask)
tmp47 = tl.sum(_tmp47, 1)[:, None]
tl.store(out_ptr4 + (x3), tmp47, xmask)
tmp56 = tl.sum(_tmp56, 1)[:, None]
tl.store(out_ptr5 + (x3), tmp56, xmask)
tmp65 = tl.sum(_tmp65, 1)[:, None]
tl.store(out_ptr6 + (x3), tmp65, xmask)
tmp74 = tl.sum(_tmp74, 1)[:, None]
tl.store(out_ptr7 + (x3), tmp74, xmask)
tmp83 = tl.sum(_tmp83, 1)[:, None]
tl.store(out_ptr8 + (x3), tmp83, xmask)
tmp92 = tl.sum(_tmp92, 1)[:, None]
tl.store(out_ptr9 + (x3), tmp92, xmask)
tmp101 = tl.sum(_tmp101, 1)[:, None]
tl.store(out_ptr10 + (x3), tmp101, xmask)
tmp110 = tl.sum(_tmp110, 1)[:, None]
tl.store(out_ptr11 + (x3), tmp110, xmask)
tmp119 = tl.sum(_tmp119, 1)[:, None]
tl.store(out_ptr12 + (x3), tmp119, xmask)
tmp128 = tl.sum(_tmp128, 1)[:, None]
tl.store(out_ptr13 + (x3), tmp128, xmask)
tmp137 = tl.sum(_tmp137, 1)[:, None]
tl.store(out_ptr14 + (x3), tmp137, xmask)
tmp146 = tl.sum(_tmp146, 1)[:, None]
tl.store(out_ptr15 + (x3), tmp146, xmask)
tmp155 = tl.sum(_tmp155, 1)[:, None]
tl.store(out_ptr16 + (x3), tmp155, xmask)
tmp164 = tl.sum(_tmp164, 1)[:, None]
tl.store(out_ptr17 + (x3), tmp164, xmask)
tmp173 = tl.sum(_tmp173, 1)[:, None]
tl.store(out_ptr18 + (x3), tmp173, xmask)
tmp182 = tl.sum(_tmp182, 1)[:, None]
tl.store(out_ptr19 + (x3), tmp182, xmask)
tmp191 = tl.sum(_tmp191, 1)[:, None]
tl.store(out_ptr20 + (x3), tmp191, xmask)
tmp200 = tl.sum(_tmp200, 1)[:, None]
tl.store(out_ptr21 + (x3), tmp200, xmask)
tmp209 = tl.sum(_tmp209, 1)[:, None]
tl.store(out_ptr22 + (x3), tmp209, xmask)
tmp218 = tl.sum(_tmp218, 1)[:, None]
tl.store(out_ptr23 + (x3), tmp218, xmask)
tmp227 = tl.sum(_tmp227, 1)[:, None]
tl.store(out_ptr24 + (x3), tmp227, xmask)
tmp236 = tl.sum(_tmp236, 1)[:, None]
tl.store(out_ptr25 + (x3), tmp236, xmask)
tmp245 = tl.sum(_tmp245, 1)[:, None]
tl.store(out_ptr26 + (x3), tmp245, xmask)
tmp254 = tl.sum(_tmp254, 1)[:, None]
tl.store(out_ptr27 + (x3), tmp254, xmask)
tmp263 = tl.sum(_tmp263, 1)[:, None]
tl.store(out_ptr28 + (x3), tmp263, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/y2/cy2iowezi666txr2ot26oju5hlurnwvyizjcwcevdg7sddbv4msq.py
# Topologically Sorted Source Nodes: [residual_59, sum_30, residual_61, sum_31, residual_63, sum_32, residual_65, sum_33, residual_67, sum_34, residual_69, sum_35, residual_71, sum_36, residual_73, sum_37, residual_75, sum_38, residual_77, sum_39, residual_79, sum_40, residual_81, sum_41, residual_83, sum_42, residual_85, sum_43, residual_87, sum_44, residual_89, sum_45, residual_91, sum_46, residual_93, sum_47, residual_95, sum_48, residual_97, sum_49, residual_99, sum_50, residual_101, sum_51, residual_103, sum_52, residual_105, sum_53, residual_107, sum_54, residual_109, sum_55, residual_111, sum_56, residual_113, sum_57], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# residual_101 => mul_50
# residual_103 => mul_51
# residual_105 => mul_52
# residual_107 => mul_53
# residual_109 => mul_54
# residual_111 => mul_55
# residual_113 => mul_56
# residual_59 => mul_29
# residual_61 => mul_30
# residual_63 => mul_31
# residual_65 => mul_32
# residual_67 => mul_33
# residual_69 => mul_34
# residual_71 => mul_35
# residual_73 => mul_36
# residual_75 => mul_37
# residual_77 => mul_38
# residual_79 => mul_39
# residual_81 => mul_40
# residual_83 => mul_41
# residual_85 => mul_42
# residual_87 => mul_43
# residual_89 => mul_44
# residual_91 => mul_45
# residual_93 => mul_46
# residual_95 => mul_47
# residual_97 => mul_48
# residual_99 => mul_49
# sum_30 => sum_32
# sum_31 => sum_33
# sum_32 => sum_34
# sum_33 => sum_35
# sum_34 => sum_36
# sum_35 => sum_37
# sum_36 => sum_38
# sum_37 => sum_39
# sum_38 => sum_40
# sum_39 => sum_41
# sum_40 => sum_42
# sum_41 => sum_43
# sum_42 => sum_44
# sum_43 => sum_45
# sum_44 => sum_46
# sum_45 => sum_47
# sum_46 => sum_48
# sum_47 => sum_49
# sum_48 => sum_50
# sum_49 => sum_51
# sum_50 => sum_52
# sum_51 => sum_53
# sum_52 => sum_54
# sum_53 => sum_55
# sum_54 => sum_56
# sum_55 => sum_57
# sum_56 => sum_58
# sum_57 => sum_59
# Graph fragment:
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_30, %unsqueeze_89), kwargs = {})
# %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_29, [-1]), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %unsqueeze_92), kwargs = {})
# %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_30, [-1]), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %unsqueeze_95), kwargs = {})
# %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_31, [-1]), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_33, %unsqueeze_98), kwargs = {})
# %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_32, [-1]), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %unsqueeze_101), kwargs = {})
# %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_33, [-1]), kwargs = {})
# %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %unsqueeze_104), kwargs = {})
# %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_34, [-1]), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_36, %unsqueeze_107), kwargs = {})
# %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_35, [-1]), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_37, %unsqueeze_110), kwargs = {})
# %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_36, [-1]), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %unsqueeze_113), kwargs = {})
# %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_37, [-1]), kwargs = {})
# %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %unsqueeze_116), kwargs = {})
# %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_38, [-1]), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_40, %unsqueeze_119), kwargs = {})
# %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_39, [-1]), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %unsqueeze_122), kwargs = {})
# %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_40, [-1]), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_42, %unsqueeze_125), kwargs = {})
# %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_41, [-1]), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_43, %unsqueeze_128), kwargs = {})
# %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_42, [-1]), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_44, %unsqueeze_131), kwargs = {})
# %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_43, [-1]), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_45, %unsqueeze_134), kwargs = {})
# %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_44, [-1]), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_46, %unsqueeze_137), kwargs = {})
# %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_45, [-1]), kwargs = {})
# %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %unsqueeze_140), kwargs = {})
# %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_46, [-1]), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_48, %unsqueeze_143), kwargs = {})
# %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_47, [-1]), kwargs = {})
# %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_49, %unsqueeze_146), kwargs = {})
# %sum_51 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_48, [-1]), kwargs = {})
# %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_50, %unsqueeze_149), kwargs = {})
# %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_49, [-1]), kwargs = {})
# %mul_50 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_51, %unsqueeze_152), kwargs = {})
# %sum_53 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_50, [-1]), kwargs = {})
# %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_52, %unsqueeze_155), kwargs = {})
# %sum_54 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_51, [-1]), kwargs = {})
# %mul_52 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_53, %unsqueeze_158), kwargs = {})
# %sum_55 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_52, [-1]), kwargs = {})
# %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_54, %unsqueeze_161), kwargs = {})
# %sum_56 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_53, [-1]), kwargs = {})
# %mul_54 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_55, %unsqueeze_164), kwargs = {})
# %sum_57 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_54, [-1]), kwargs = {})
# %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_56, %unsqueeze_167), kwargs = {})
# %sum_58 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_55, [-1]), kwargs = {})
# %mul_56 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_57, %unsqueeze_170), kwargs = {})
# %sum_59 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_56, [-1]), kwargs = {})
triton_red_fused_mul_sum_4 = async_compile.triton('triton_red_fused_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[512, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: 'i32', 60: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 58, 'num_reduction': 28, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x1 = (xindex // 128)
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (118784 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr4 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp12 = tl.load(in_ptr1 + (122880 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr1 + (126976 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr1 + (131072 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp39 = tl.load(in_ptr1 + (135168 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp48 = tl.load(in_ptr1 + (139264 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp57 = tl.load(in_ptr1 + (143360 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tl.load(in_ptr10 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp66 = tl.load(in_ptr1 + (147456 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tl.load(in_ptr11 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp75 = tl.load(in_ptr1 + (151552 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp83 = tl.load(in_ptr12 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp84 = tl.load(in_ptr1 + (155648 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp92 = tl.load(in_ptr13 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp93 = tl.load(in_ptr1 + (159744 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp101 = tl.load(in_ptr14 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp102 = tl.load(in_ptr1 + (163840 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp110 = tl.load(in_ptr15 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp111 = tl.load(in_ptr1 + (167936 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp119 = tl.load(in_ptr16 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp120 = tl.load(in_ptr1 + (172032 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp128 = tl.load(in_ptr17 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp129 = tl.load(in_ptr1 + (176128 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp137 = tl.load(in_ptr18 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp138 = tl.load(in_ptr1 + (180224 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp146 = tl.load(in_ptr19 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp147 = tl.load(in_ptr1 + (184320 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp155 = tl.load(in_ptr20 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp156 = tl.load(in_ptr1 + (188416 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp164 = tl.load(in_ptr21 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp165 = tl.load(in_ptr1 + (192512 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp173 = tl.load(in_ptr22 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp174 = tl.load(in_ptr1 + (196608 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp182 = tl.load(in_ptr23 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp183 = tl.load(in_ptr1 + (200704 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp191 = tl.load(in_ptr24 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp192 = tl.load(in_ptr1 + (204800 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp200 = tl.load(in_ptr25 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp201 = tl.load(in_ptr1 + (208896 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp209 = tl.load(in_ptr26 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp210 = tl.load(in_ptr1 + (212992 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp218 = tl.load(in_ptr27 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp219 = tl.load(in_ptr1 + (217088 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp227 = tl.load(in_ptr28 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp228 = tl.load(in_ptr1 + (221184 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp236 = tl.load(in_ptr29 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp237 = tl.load(in_ptr1 + (225280 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp245 = tl.load(in_ptr30 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp246 = tl.load(in_ptr1 + (229376 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp13 = tmp12 - tmp2
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tmp22 = tmp21 - tmp2
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp25 = tmp20 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = _tmp27 + tmp26
_tmp27 = tl.where(rmask & xmask, tmp28, _tmp27)
tmp31 = tmp30 - tmp2
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp34 = tmp29 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = _tmp36 + tmp35
_tmp36 = tl.where(rmask & xmask, tmp37, _tmp36)
tmp40 = tmp39 - tmp2
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp41 / tmp5
tmp43 = tmp38 * tmp42
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = _tmp45 + tmp44
_tmp45 = tl.where(rmask & xmask, tmp46, _tmp45)
tmp49 = tmp48 - tmp2
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp5
tmp52 = tmp47 * tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = _tmp54 + tmp53
_tmp54 = tl.where(rmask & xmask, tmp55, _tmp54)
tmp58 = tmp57 - tmp2
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp59 / tmp5
tmp61 = tmp56 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = _tmp63 + tmp62
_tmp63 = tl.where(rmask & xmask, tmp64, _tmp63)
tmp67 = tmp66 - tmp2
tmp68 = tl_math.exp(tmp67)
tmp69 = tmp68 / tmp5
tmp70 = tmp65 * tmp69
tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK])
tmp73 = _tmp72 + tmp71
_tmp72 = tl.where(rmask & xmask, tmp73, _tmp72)
tmp76 = tmp75 - tmp2
tmp77 = tl_math.exp(tmp76)
tmp78 = tmp77 / tmp5
tmp79 = tmp74 * tmp78
tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK])
tmp82 = _tmp81 + tmp80
_tmp81 = tl.where(rmask & xmask, tmp82, _tmp81)
tmp85 = tmp84 - tmp2
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp86 / tmp5
tmp88 = tmp83 * tmp87
tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK])
tmp91 = _tmp90 + tmp89
_tmp90 = tl.where(rmask & xmask, tmp91, _tmp90)
tmp94 = tmp93 - tmp2
tmp95 = tl_math.exp(tmp94)
tmp96 = tmp95 / tmp5
tmp97 = tmp92 * tmp96
tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK])
tmp100 = _tmp99 + tmp98
_tmp99 = tl.where(rmask & xmask, tmp100, _tmp99)
tmp103 = tmp102 - tmp2
tmp104 = tl_math.exp(tmp103)
tmp105 = tmp104 / tmp5
tmp106 = tmp101 * tmp105
tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK])
tmp109 = _tmp108 + tmp107
_tmp108 = tl.where(rmask & xmask, tmp109, _tmp108)
tmp112 = tmp111 - tmp2
tmp113 = tl_math.exp(tmp112)
tmp114 = tmp113 / tmp5
tmp115 = tmp110 * tmp114
tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK])
tmp118 = _tmp117 + tmp116
_tmp117 = tl.where(rmask & xmask, tmp118, _tmp117)
tmp121 = tmp120 - tmp2
tmp122 = tl_math.exp(tmp121)
tmp123 = tmp122 / tmp5
tmp124 = tmp119 * tmp123
tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK])
tmp127 = _tmp126 + tmp125
_tmp126 = tl.where(rmask & xmask, tmp127, _tmp126)
tmp130 = tmp129 - tmp2
tmp131 = tl_math.exp(tmp130)
tmp132 = tmp131 / tmp5
tmp133 = tmp128 * tmp132
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = _tmp135 + tmp134
_tmp135 = tl.where(rmask & xmask, tmp136, _tmp135)
tmp139 = tmp138 - tmp2
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp140 / tmp5
tmp142 = tmp137 * tmp141
tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK])
tmp145 = _tmp144 + tmp143
_tmp144 = tl.where(rmask & xmask, tmp145, _tmp144)
tmp148 = tmp147 - tmp2
tmp149 = tl_math.exp(tmp148)
tmp150 = tmp149 / tmp5
tmp151 = tmp146 * tmp150
tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK])
tmp154 = _tmp153 + tmp152
_tmp153 = tl.where(rmask & xmask, tmp154, _tmp153)
tmp157 = tmp156 - tmp2
tmp158 = tl_math.exp(tmp157)
tmp159 = tmp158 / tmp5
tmp160 = tmp155 * tmp159
tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK])
tmp163 = _tmp162 + tmp161
_tmp162 = tl.where(rmask & xmask, tmp163, _tmp162)
tmp166 = tmp165 - tmp2
tmp167 = tl_math.exp(tmp166)
tmp168 = tmp167 / tmp5
tmp169 = tmp164 * tmp168
tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK])
tmp172 = _tmp171 + tmp170
_tmp171 = tl.where(rmask & xmask, tmp172, _tmp171)
tmp175 = tmp174 - tmp2
tmp176 = tl_math.exp(tmp175)
tmp177 = tmp176 / tmp5
tmp178 = tmp173 * tmp177
tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK])
tmp181 = _tmp180 + tmp179
_tmp180 = tl.where(rmask & xmask, tmp181, _tmp180)
tmp184 = tmp183 - tmp2
tmp185 = tl_math.exp(tmp184)
tmp186 = tmp185 / tmp5
tmp187 = tmp182 * tmp186
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp190 = _tmp189 + tmp188
_tmp189 = tl.where(rmask & xmask, tmp190, _tmp189)
tmp193 = tmp192 - tmp2
tmp194 = tl_math.exp(tmp193)
tmp195 = tmp194 / tmp5
tmp196 = tmp191 * tmp195
tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK])
tmp199 = _tmp198 + tmp197
_tmp198 = tl.where(rmask & xmask, tmp199, _tmp198)
tmp202 = tmp201 - tmp2
tmp203 = tl_math.exp(tmp202)
tmp204 = tmp203 / tmp5
tmp205 = tmp200 * tmp204
tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK])
tmp208 = _tmp207 + tmp206
_tmp207 = tl.where(rmask & xmask, tmp208, _tmp207)
tmp211 = tmp210 - tmp2
tmp212 = tl_math.exp(tmp211)
tmp213 = tmp212 / tmp5
tmp214 = tmp209 * tmp213
tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK])
tmp217 = _tmp216 + tmp215
_tmp216 = tl.where(rmask & xmask, tmp217, _tmp216)
tmp220 = tmp219 - tmp2
tmp221 = tl_math.exp(tmp220)
tmp222 = tmp221 / tmp5
tmp223 = tmp218 * tmp222
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp226 = _tmp225 + tmp224
_tmp225 = tl.where(rmask & xmask, tmp226, _tmp225)
tmp229 = tmp228 - tmp2
tmp230 = tl_math.exp(tmp229)
tmp231 = tmp230 / tmp5
tmp232 = tmp227 * tmp231
tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK])
tmp235 = _tmp234 + tmp233
_tmp234 = tl.where(rmask & xmask, tmp235, _tmp234)
tmp238 = tmp237 - tmp2
tmp239 = tl_math.exp(tmp238)
tmp240 = tmp239 / tmp5
tmp241 = tmp236 * tmp240
tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK])
tmp244 = _tmp243 + tmp242
_tmp243 = tl.where(rmask & xmask, tmp244, _tmp243)
tmp247 = tmp246 - tmp2
tmp248 = tl_math.exp(tmp247)
tmp249 = tmp248 / tmp5
tmp250 = tmp245 * tmp249
tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK])
tmp253 = _tmp252 + tmp251
_tmp252 = tl.where(rmask & xmask, tmp253, _tmp252)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp9, xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tl.store(out_ptr1 + (x3), tmp18, xmask)
tmp27 = tl.sum(_tmp27, 1)[:, None]
tl.store(out_ptr2 + (x3), tmp27, xmask)
tmp36 = tl.sum(_tmp36, 1)[:, None]
tl.store(out_ptr3 + (x3), tmp36, xmask)
tmp45 = tl.sum(_tmp45, 1)[:, None]
tl.store(out_ptr4 + (x3), tmp45, xmask)
tmp54 = tl.sum(_tmp54, 1)[:, None]
tl.store(out_ptr5 + (x3), tmp54, xmask)
tmp63 = tl.sum(_tmp63, 1)[:, None]
tl.store(out_ptr6 + (x3), tmp63, xmask)
tmp72 = tl.sum(_tmp72, 1)[:, None]
tl.store(out_ptr7 + (x3), tmp72, xmask)
tmp81 = tl.sum(_tmp81, 1)[:, None]
tl.store(out_ptr8 + (x3), tmp81, xmask)
tmp90 = tl.sum(_tmp90, 1)[:, None]
tl.store(out_ptr9 + (x3), tmp90, xmask)
tmp99 = tl.sum(_tmp99, 1)[:, None]
tl.store(out_ptr10 + (x3), tmp99, xmask)
tmp108 = tl.sum(_tmp108, 1)[:, None]
tl.store(out_ptr11 + (x3), tmp108, xmask)
tmp117 = tl.sum(_tmp117, 1)[:, None]
tl.store(out_ptr12 + (x3), tmp117, xmask)
tmp126 = tl.sum(_tmp126, 1)[:, None]
tl.store(out_ptr13 + (x3), tmp126, xmask)
tmp135 = tl.sum(_tmp135, 1)[:, None]
tl.store(out_ptr14 + (x3), tmp135, xmask)
tmp144 = tl.sum(_tmp144, 1)[:, None]
tl.store(out_ptr15 + (x3), tmp144, xmask)
tmp153 = tl.sum(_tmp153, 1)[:, None]
tl.store(out_ptr16 + (x3), tmp153, xmask)
tmp162 = tl.sum(_tmp162, 1)[:, None]
tl.store(out_ptr17 + (x3), tmp162, xmask)
tmp171 = tl.sum(_tmp171, 1)[:, None]
tl.store(out_ptr18 + (x3), tmp171, xmask)
tmp180 = tl.sum(_tmp180, 1)[:, None]
tl.store(out_ptr19 + (x3), tmp180, xmask)
tmp189 = tl.sum(_tmp189, 1)[:, None]
tl.store(out_ptr20 + (x3), tmp189, xmask)
tmp198 = tl.sum(_tmp198, 1)[:, None]
tl.store(out_ptr21 + (x3), tmp198, xmask)
tmp207 = tl.sum(_tmp207, 1)[:, None]
tl.store(out_ptr22 + (x3), tmp207, xmask)
tmp216 = tl.sum(_tmp216, 1)[:, None]
tl.store(out_ptr23 + (x3), tmp216, xmask)
tmp225 = tl.sum(_tmp225, 1)[:, None]
tl.store(out_ptr24 + (x3), tmp225, xmask)
tmp234 = tl.sum(_tmp234, 1)[:, None]
tl.store(out_ptr25 + (x3), tmp234, xmask)
tmp243 = tl.sum(_tmp243, 1)[:, None]
tl.store(out_ptr26 + (x3), tmp243, xmask)
tmp252 = tl.sum(_tmp252, 1)[:, None]
tl.store(out_ptr27 + (x3), tmp252, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/2y/c2ycvmz55d2cdgshqzt2ylgrhl3hwv23izoty2oj53exxztlcefi.py
# Topologically Sorted Source Nodes: [residual_115, sum_58, residual_117, sum_59, residual_119, sum_60, residual_121, sum_61, residual_123, sum_62, residual_125, sum_63, residual_127, sum_64], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# residual_115 => mul_57
# residual_117 => mul_58
# residual_119 => mul_59
# residual_121 => mul_60
# residual_123 => mul_61
# residual_125 => mul_62
# residual_127 => mul_63
# sum_58 => sum_60
# sum_59 => sum_61
# sum_60 => sum_62
# sum_61 => sum_63
# sum_62 => sum_64
# sum_63 => sum_65
# sum_64 => sum_66
# Graph fragment:
# %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_58, %unsqueeze_173), kwargs = {})
# %sum_60 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_57, [-1]), kwargs = {})
# %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_59, %unsqueeze_176), kwargs = {})
# %sum_61 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_58, [-1]), kwargs = {})
# %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_60, %unsqueeze_179), kwargs = {})
# %sum_62 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_59, [-1]), kwargs = {})
# %mul_60 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_61, %unsqueeze_182), kwargs = {})
# %sum_63 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_60, [-1]), kwargs = {})
# %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_62, %unsqueeze_185), kwargs = {})
# %sum_64 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_61, [-1]), kwargs = {})
# %mul_62 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_63, %unsqueeze_188), kwargs = {})
# %sum_65 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_62, [-1]), kwargs = {})
# %mul_63 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_64, %unsqueeze_191), kwargs = {})
# %sum_66 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_63, [-1]), kwargs = {})
triton_red_fused_mul_sum_5 = async_compile.triton('triton_red_fused_mul_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[512, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32', 18: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 7, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x1 = (xindex // 128)
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (233472 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr4 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp12 = tl.load(in_ptr1 + (237568 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr1 + (241664 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr1 + (245760 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp39 = tl.load(in_ptr1 + (249856 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp48 = tl.load(in_ptr1 + (253952 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp57 = tl.load(in_ptr1 + (258048 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp13 = tmp12 - tmp2
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tmp22 = tmp21 - tmp2
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp25 = tmp20 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = _tmp27 + tmp26
_tmp27 = tl.where(rmask & xmask, tmp28, _tmp27)
tmp31 = tmp30 - tmp2
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp34 = tmp29 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = _tmp36 + tmp35
_tmp36 = tl.where(rmask & xmask, tmp37, _tmp36)
tmp40 = tmp39 - tmp2
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp41 / tmp5
tmp43 = tmp38 * tmp42
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = _tmp45 + tmp44
_tmp45 = tl.where(rmask & xmask, tmp46, _tmp45)
tmp49 = tmp48 - tmp2
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp5
tmp52 = tmp47 * tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = _tmp54 + tmp53
_tmp54 = tl.where(rmask & xmask, tmp55, _tmp54)
tmp58 = tmp57 - tmp2
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp59 / tmp5
tmp61 = tmp56 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = _tmp63 + tmp62
_tmp63 = tl.where(rmask & xmask, tmp64, _tmp63)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp9, xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tl.store(out_ptr1 + (x3), tmp18, xmask)
tmp27 = tl.sum(_tmp27, 1)[:, None]
tl.store(out_ptr2 + (x3), tmp27, xmask)
tmp36 = tl.sum(_tmp36, 1)[:, None]
tl.store(out_ptr3 + (x3), tmp36, xmask)
tmp45 = tl.sum(_tmp45, 1)[:, None]
tl.store(out_ptr4 + (x3), tmp45, xmask)
tmp54 = tl.sum(_tmp54, 1)[:, None]
tl.store(out_ptr5 + (x3), tmp54, xmask)
tmp63 = tl.sum(_tmp63, 1)[:, None]
tl.store(out_ptr6 + (x3), tmp63, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vs/cvsgt2p572puh5ryhxl6bcviuizky5j4dgt53l7wfwlf2juihd4a.py
# Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63, vlad_1], Original ATen: [aten.zeros, aten.copy, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# setitem => copy
# setitem_1 => copy_1
# setitem_10 => copy_10
# setitem_11 => copy_11
# setitem_12 => copy_12
# setitem_13 => copy_13
# setitem_14 => copy_14
# setitem_15 => copy_15
# setitem_16 => copy_16
# setitem_17 => copy_17
# setitem_18 => copy_18
# setitem_19 => copy_19
# setitem_2 => copy_2
# setitem_20 => copy_20
# setitem_21 => copy_21
# setitem_22 => copy_22
# setitem_23 => copy_23
# setitem_24 => copy_24
# setitem_25 => copy_25
# setitem_26 => copy_26
# setitem_27 => copy_27
# setitem_28 => copy_28
# setitem_29 => copy_29
# setitem_3 => copy_3
# setitem_30 => copy_30
# setitem_31 => copy_31
# setitem_32 => copy_32
# setitem_33 => copy_33
# setitem_34 => copy_34
# setitem_35 => copy_35
# setitem_36 => copy_36
# setitem_37 => copy_37
# setitem_38 => copy_38
# setitem_39 => copy_39
# setitem_4 => copy_4
# setitem_40 => copy_40
# setitem_41 => copy_41
# setitem_42 => copy_42
# setitem_43 => copy_43
# setitem_44 => copy_44
# setitem_45 => copy_45
# setitem_46 => copy_46
# setitem_47 => copy_47
# setitem_48 => copy_48
# setitem_49 => copy_49
# setitem_5 => copy_5
# setitem_50 => copy_50
# setitem_51 => copy_51
# setitem_52 => copy_52
# setitem_53 => copy_53
# setitem_54 => copy_54
# setitem_55 => copy_55
# setitem_56 => copy_56
# setitem_57 => copy_57
# setitem_58 => copy_58
# setitem_59 => copy_59
# setitem_6 => copy_6
# setitem_60 => copy_60
# setitem_61 => copy_61
# setitem_62 => copy_62
# setitem_63 => copy_63
# setitem_7 => copy_7
# setitem_8 => copy_8
# setitem_9 => copy_9
# vlad => full
# vlad_1 => pow_3, pow_4, sum_67
# Graph fragment:
# %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 64, 128], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_7, %sum_3), kwargs = {})
# %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%full, %copy, 1, 0, 1), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_26, %sum_4), kwargs = {})
# %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %copy_1, 1, 1, 2), kwargs = {})
# %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_45, %sum_5), kwargs = {})
# %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %copy_2, 1, 2, 3), kwargs = {})
# %copy_3 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_64, %sum_6), kwargs = {})
# %slice_scatter_default_3 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %copy_3, 1, 3, 4), kwargs = {})
# %copy_4 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_83, %sum_7), kwargs = {})
# %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %copy_4, 1, 4, 5), kwargs = {})
# %copy_5 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_102, %sum_8), kwargs = {})
# %slice_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %copy_5, 1, 5, 6), kwargs = {})
# %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_121, %sum_9), kwargs = {})
# %slice_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_5, %copy_6, 1, 6, 7), kwargs = {})
# %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_140, %sum_10), kwargs = {})
# %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_6, %copy_7, 1, 7, 8), kwargs = {})
# %copy_8 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_159, %sum_11), kwargs = {})
# %slice_scatter_default_8 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %copy_8, 1, 8, 9), kwargs = {})
# %copy_9 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_178, %sum_12), kwargs = {})
# %slice_scatter_default_9 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_8, %copy_9, 1, 9, 10), kwargs = {})
# %copy_10 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_197, %sum_13), kwargs = {})
# %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_9, %copy_10, 1, 10, 11), kwargs = {})
# %copy_11 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_216, %sum_14), kwargs = {})
# %slice_scatter_default_11 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %copy_11, 1, 11, 12), kwargs = {})
# %copy_12 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_235, %sum_15), kwargs = {})
# %slice_scatter_default_12 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_11, %copy_12, 1, 12, 13), kwargs = {})
# %copy_13 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_254, %sum_16), kwargs = {})
# %slice_scatter_default_13 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_12, %copy_13, 1, 13, 14), kwargs = {})
# %copy_14 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_273, %sum_17), kwargs = {})
# %slice_scatter_default_14 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_13, %copy_14, 1, 14, 15), kwargs = {})
# %copy_15 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_292, %sum_18), kwargs = {})
# %slice_scatter_default_15 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_14, %copy_15, 1, 15, 16), kwargs = {})
# %copy_16 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_311, %sum_19), kwargs = {})
# %slice_scatter_default_16 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_15, %copy_16, 1, 16, 17), kwargs = {})
# %copy_17 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_330, %sum_20), kwargs = {})
# %slice_scatter_default_17 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_16, %copy_17, 1, 17, 18), kwargs = {})
# %copy_18 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_349, %sum_21), kwargs = {})
# %slice_scatter_default_18 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_17, %copy_18, 1, 18, 19), kwargs = {})
# %copy_19 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_368, %sum_22), kwargs = {})
# %slice_scatter_default_19 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_18, %copy_19, 1, 19, 20), kwargs = {})
# %copy_20 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_387, %sum_23), kwargs = {})
# %slice_scatter_default_20 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_19, %copy_20, 1, 20, 21), kwargs = {})
# %copy_21 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_406, %sum_24), kwargs = {})
# %slice_scatter_default_21 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_20, %copy_21, 1, 21, 22), kwargs = {})
# %copy_22 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_425, %sum_25), kwargs = {})
# %slice_scatter_default_22 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_21, %copy_22, 1, 22, 23), kwargs = {})
# %copy_23 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_444, %sum_26), kwargs = {})
# %slice_scatter_default_23 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_22, %copy_23, 1, 23, 24), kwargs = {})
# %copy_24 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_463, %sum_27), kwargs = {})
# %slice_scatter_default_24 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_23, %copy_24, 1, 24, 25), kwargs = {})
# %copy_25 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_482, %sum_28), kwargs = {})
# %slice_scatter_default_25 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_24, %copy_25, 1, 25, 26), kwargs = {})
# %copy_26 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_501, %sum_29), kwargs = {})
# %slice_scatter_default_26 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_25, %copy_26, 1, 26, 27), kwargs = {})
# %copy_27 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_520, %sum_30), kwargs = {})
# %slice_scatter_default_27 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_26, %copy_27, 1, 27, 28), kwargs = {})
# %copy_28 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_539, %sum_31), kwargs = {})
# %slice_scatter_default_28 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_27, %copy_28, 1, 28, 29), kwargs = {})
# %copy_29 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_558, %sum_32), kwargs = {})
# %slice_scatter_default_29 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_28, %copy_29, 1, 29, 30), kwargs = {})
# %copy_30 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_577, %sum_33), kwargs = {})
# %slice_scatter_default_30 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_29, %copy_30, 1, 30, 31), kwargs = {})
# %copy_31 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_596, %sum_34), kwargs = {})
# %slice_scatter_default_31 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_30, %copy_31, 1, 31, 32), kwargs = {})
# %copy_32 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_615, %sum_35), kwargs = {})
# %slice_scatter_default_32 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_31, %copy_32, 1, 32, 33), kwargs = {})
# %copy_33 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_634, %sum_36), kwargs = {})
# %slice_scatter_default_33 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_32, %copy_33, 1, 33, 34), kwargs = {})
# %copy_34 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_653, %sum_37), kwargs = {})
# %slice_scatter_default_34 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_33, %copy_34, 1, 34, 35), kwargs = {})
# %copy_35 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_672, %sum_38), kwargs = {})
# %slice_scatter_default_35 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_34, %copy_35, 1, 35, 36), kwargs = {})
# %copy_36 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_691, %sum_39), kwargs = {})
# %slice_scatter_default_36 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_35, %copy_36, 1, 36, 37), kwargs = {})
# %copy_37 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_710, %sum_40), kwargs = {})
# %slice_scatter_default_37 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_36, %copy_37, 1, 37, 38), kwargs = {})
# %copy_38 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_729, %sum_41), kwargs = {})
# %slice_scatter_default_38 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_37, %copy_38, 1, 38, 39), kwargs = {})
# %copy_39 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_748, %sum_42), kwargs = {})
# %slice_scatter_default_39 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_38, %copy_39, 1, 39, 40), kwargs = {})
# %copy_40 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_767, %sum_43), kwargs = {})
# %slice_scatter_default_40 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_39, %copy_40, 1, 40, 41), kwargs = {})
# %copy_41 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_786, %sum_44), kwargs = {})
# %slice_scatter_default_41 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_40, %copy_41, 1, 41, 42), kwargs = {})
# %copy_42 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_805, %sum_45), kwargs = {})
# %slice_scatter_default_42 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_41, %copy_42, 1, 42, 43), kwargs = {})
# %copy_43 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_824, %sum_46), kwargs = {})
# %slice_scatter_default_43 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_42, %copy_43, 1, 43, 44), kwargs = {})
# %copy_44 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_843, %sum_47), kwargs = {})
# %slice_scatter_default_44 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_43, %copy_44, 1, 44, 45), kwargs = {})
# %copy_45 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_862, %sum_48), kwargs = {})
# %slice_scatter_default_45 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_44, %copy_45, 1, 45, 46), kwargs = {})
# %copy_46 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_881, %sum_49), kwargs = {})
# %slice_scatter_default_46 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_45, %copy_46, 1, 46, 47), kwargs = {})
# %copy_47 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_900, %sum_50), kwargs = {})
# %slice_scatter_default_47 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_46, %copy_47, 1, 47, 48), kwargs = {})
# %copy_48 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_919, %sum_51), kwargs = {})
# %slice_scatter_default_48 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_47, %copy_48, 1, 48, 49), kwargs = {})
# %copy_49 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_938, %sum_52), kwargs = {})
# %slice_scatter_default_49 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_48, %copy_49, 1, 49, 50), kwargs = {})
# %copy_50 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_957, %sum_53), kwargs = {})
# %slice_scatter_default_50 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_49, %copy_50, 1, 50, 51), kwargs = {})
# %copy_51 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_976, %sum_54), kwargs = {})
# %slice_scatter_default_51 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_50, %copy_51, 1, 51, 52), kwargs = {})
# %copy_52 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_995, %sum_55), kwargs = {})
# %slice_scatter_default_52 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_51, %copy_52, 1, 52, 53), kwargs = {})
# %copy_53 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1014, %sum_56), kwargs = {})
# %slice_scatter_default_53 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_52, %copy_53, 1, 53, 54), kwargs = {})
# %copy_54 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1033, %sum_57), kwargs = {})
# %slice_scatter_default_54 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_53, %copy_54, 1, 54, 55), kwargs = {})
# %copy_55 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1052, %sum_58), kwargs = {})
# %slice_scatter_default_55 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_54, %copy_55, 1, 55, 56), kwargs = {})
# %copy_56 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1071, %sum_59), kwargs = {})
# %slice_scatter_default_56 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_55, %copy_56, 1, 56, 57), kwargs = {})
# %copy_57 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1090, %sum_60), kwargs = {})
# %slice_scatter_default_57 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_56, %copy_57, 1, 57, 58), kwargs = {})
# %copy_58 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1109, %sum_61), kwargs = {})
# %slice_scatter_default_58 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_57, %copy_58, 1, 58, 59), kwargs = {})
# %copy_59 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1128, %sum_62), kwargs = {})
# %slice_scatter_default_59 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_58, %copy_59, 1, 59, 60), kwargs = {})
# %copy_60 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1147, %sum_63), kwargs = {})
# %slice_scatter_default_60 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_59, %copy_60, 1, 60, 61), kwargs = {})
# %copy_61 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1166, %sum_64), kwargs = {})
# %slice_scatter_default_61 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_60, %copy_61, 1, 61, 62), kwargs = {})
# %copy_62 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1185, %sum_65), kwargs = {})
# %slice_scatter_default_62 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_61, %copy_62, 1, 62, 63), kwargs = {})
# %copy_63 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1204, %sum_66), kwargs = {})
# %slice_scatter_default_63 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_62, %copy_63, 1, 63, 64), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%slice_scatter_default_63, 2), kwargs = {})
# %sum_67 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {})
# %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_67, 0.5), kwargs = {})
triton_per_fused_copy_linalg_vector_norm_zeros_6 = async_compile.triton('triton_per_fused_copy_linalg_vector_norm_zeros_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: 'i32', 67: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_copy_linalg_vector_norm_zeros_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 64, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 64
r2 = rindex
x1 = (xindex // 64)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1, 1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (r2 + (128*x1)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 3, tl.int64)
tmp8 = tmp0 >= tmp7
tmp9 = tmp0 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tl.load(in_ptr1 + (r2 + (128*x1)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.full([1, 1], 2, tl.int64)
tmp13 = tmp0 >= tmp12
tmp14 = tmp0 < tmp7
tmp15 = tmp13 & tmp14
tmp16 = tl.load(in_ptr2 + (r2 + (128*x1)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.full([1, 1], 1, tl.int64)
tmp18 = tmp0 >= tmp17
tmp19 = tmp0 < tmp12
tmp20 = tmp18 & tmp19
tmp21 = tl.load(in_ptr3 + (r2 + (128*x1)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp0 < tmp17
tmp23 = tl.load(in_ptr4 + (r2 + (128*x1)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = 0.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tl.where(tmp20, tmp21, tmp25)
tmp27 = tl.where(tmp15, tmp16, tmp26)
tmp28 = tl.where(tmp10, tmp11, tmp27)
tmp29 = tl.where(tmp5, tmp6, tmp28)
tmp30 = tl.full([1, 1], 8, tl.int64)
tmp31 = tmp0 >= tmp30
tmp32 = tl.full([1, 1], 9, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr5 + (r2 + (128*x1)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.full([1, 1], 7, tl.int64)
tmp37 = tmp0 >= tmp36
tmp38 = tmp0 < tmp30
tmp39 = tmp37 & tmp38
tmp40 = tl.load(in_ptr6 + (r2 + (128*x1)), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tl.full([1, 1], 6, tl.int64)
tmp42 = tmp0 >= tmp41
tmp43 = tmp0 < tmp36
tmp44 = tmp42 & tmp43
tmp45 = tl.load(in_ptr7 + (r2 + (128*x1)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp46 = tmp0 >= tmp3
tmp47 = tmp0 < tmp41
tmp48 = tmp46 & tmp47
tmp49 = tl.load(in_ptr8 + (r2 + (128*x1)), tmp48 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.where(tmp48, tmp49, tmp29)
tmp51 = tl.where(tmp44, tmp45, tmp50)
tmp52 = tl.where(tmp39, tmp40, tmp51)
tmp53 = tl.where(tmp34, tmp35, tmp52)
tmp54 = tl.full([1, 1], 12, tl.int64)
tmp55 = tmp0 >= tmp54
tmp56 = tl.full([1, 1], 13, tl.int64)
tmp57 = tmp0 < tmp56
tmp58 = tmp55 & tmp57
tmp59 = tl.load(in_ptr9 + (r2 + (128*x1)), tmp58 & xmask, eviction_policy='evict_last', other=0.0)
tmp60 = tl.full([1, 1], 11, tl.int64)
tmp61 = tmp0 >= tmp60
tmp62 = tmp0 < tmp54
tmp63 = tmp61 & tmp62
tmp64 = tl.load(in_ptr10 + (r2 + (128*x1)), tmp63 & xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tl.full([1, 1], 10, tl.int64)
tmp66 = tmp0 >= tmp65
tmp67 = tmp0 < tmp60
tmp68 = tmp66 & tmp67
tmp69 = tl.load(in_ptr11 + (r2 + (128*x1)), tmp68 & xmask, eviction_policy='evict_last', other=0.0)
tmp70 = tmp0 >= tmp32
tmp71 = tmp0 < tmp65
tmp72 = tmp70 & tmp71
tmp73 = tl.load(in_ptr12 + (r2 + (128*x1)), tmp72 & xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp72, tmp73, tmp53)
tmp75 = tl.where(tmp68, tmp69, tmp74)
tmp76 = tl.where(tmp63, tmp64, tmp75)
tmp77 = tl.where(tmp58, tmp59, tmp76)
tmp78 = tl.full([1, 1], 16, tl.int64)
tmp79 = tmp0 >= tmp78
tmp80 = tl.full([1, 1], 17, tl.int64)
tmp81 = tmp0 < tmp80
tmp82 = tmp79 & tmp81
tmp83 = tl.load(in_ptr13 + (r2 + (128*x1)), tmp82 & xmask, eviction_policy='evict_last', other=0.0)
tmp84 = tl.full([1, 1], 15, tl.int64)
tmp85 = tmp0 >= tmp84
tmp86 = tmp0 < tmp78
tmp87 = tmp85 & tmp86
tmp88 = tl.load(in_ptr14 + (r2 + (128*x1)), tmp87 & xmask, eviction_policy='evict_last', other=0.0)
tmp89 = tl.full([1, 1], 14, tl.int64)
tmp90 = tmp0 >= tmp89
tmp91 = tmp0 < tmp84
tmp92 = tmp90 & tmp91
tmp93 = tl.load(in_ptr15 + (r2 + (128*x1)), tmp92 & xmask, eviction_policy='evict_last', other=0.0)
tmp94 = tmp0 >= tmp56
tmp95 = tmp0 < tmp89
tmp96 = tmp94 & tmp95
tmp97 = tl.load(in_ptr16 + (r2 + (128*x1)), tmp96 & xmask, eviction_policy='evict_last', other=0.0)
tmp98 = tl.where(tmp96, tmp97, tmp77)
tmp99 = tl.where(tmp92, tmp93, tmp98)
tmp100 = tl.where(tmp87, tmp88, tmp99)
tmp101 = tl.where(tmp82, tmp83, tmp100)
tmp102 = tl.full([1, 1], 20, tl.int64)
tmp103 = tmp0 >= tmp102
tmp104 = tl.full([1, 1], 21, tl.int64)
tmp105 = tmp0 < tmp104
tmp106 = tmp103 & tmp105
tmp107 = tl.load(in_ptr17 + (r2 + (128*x1)), tmp106 & xmask, eviction_policy='evict_last', other=0.0)
tmp108 = tl.full([1, 1], 19, tl.int64)
tmp109 = tmp0 >= tmp108
tmp110 = tmp0 < tmp102
tmp111 = tmp109 & tmp110
tmp112 = tl.load(in_ptr18 + (r2 + (128*x1)), tmp111 & xmask, eviction_policy='evict_last', other=0.0)
tmp113 = tl.full([1, 1], 18, tl.int64)
tmp114 = tmp0 >= tmp113
tmp115 = tmp0 < tmp108
tmp116 = tmp114 & tmp115
tmp117 = tl.load(in_ptr19 + (r2 + (128*x1)), tmp116 & xmask, eviction_policy='evict_last', other=0.0)
tmp118 = tmp0 >= tmp80
tmp119 = tmp0 < tmp113
tmp120 = tmp118 & tmp119
tmp121 = tl.load(in_ptr20 + (r2 + (128*x1)), tmp120 & xmask, eviction_policy='evict_last', other=0.0)
tmp122 = tl.where(tmp120, tmp121, tmp101)
tmp123 = tl.where(tmp116, tmp117, tmp122)
tmp124 = tl.where(tmp111, tmp112, tmp123)
tmp125 = tl.where(tmp106, tmp107, tmp124)
tmp126 = tl.full([1, 1], 24, tl.int64)
tmp127 = tmp0 >= tmp126
tmp128 = tl.full([1, 1], 25, tl.int64)
tmp129 = tmp0 < tmp128
tmp130 = tmp127 & tmp129
tmp131 = tl.load(in_ptr21 + (r2 + (128*x1)), tmp130 & xmask, eviction_policy='evict_last', other=0.0)
tmp132 = tl.full([1, 1], 23, tl.int64)
tmp133 = tmp0 >= tmp132
tmp134 = tmp0 < tmp126
tmp135 = tmp133 & tmp134
tmp136 = tl.load(in_ptr22 + (r2 + (128*x1)), tmp135 & xmask, eviction_policy='evict_last', other=0.0)
tmp137 = tl.full([1, 1], 22, tl.int64)
tmp138 = tmp0 >= tmp137
tmp139 = tmp0 < tmp132
tmp140 = tmp138 & tmp139
tmp141 = tl.load(in_ptr23 + (r2 + (128*x1)), tmp140 & xmask, eviction_policy='evict_last', other=0.0)
tmp142 = tmp0 >= tmp104
tmp143 = tmp0 < tmp137
tmp144 = tmp142 & tmp143
tmp145 = tl.load(in_ptr24 + (r2 + (128*x1)), tmp144 & xmask, eviction_policy='evict_last', other=0.0)
tmp146 = tl.where(tmp144, tmp145, tmp125)
tmp147 = tl.where(tmp140, tmp141, tmp146)
tmp148 = tl.where(tmp135, tmp136, tmp147)
tmp149 = tl.where(tmp130, tmp131, tmp148)
tmp150 = tl.full([1, 1], 28, tl.int64)
tmp151 = tmp0 >= tmp150
tmp152 = tl.full([1, 1], 29, tl.int64)
tmp153 = tmp0 < tmp152
tmp154 = tmp151 & tmp153
tmp155 = tl.load(in_ptr25 + (r2 + (128*x1)), tmp154 & xmask, eviction_policy='evict_last', other=0.0)
tmp156 = tl.full([1, 1], 27, tl.int64)
tmp157 = tmp0 >= tmp156
tmp158 = tmp0 < tmp150
tmp159 = tmp157 & tmp158
tmp160 = tl.load(in_ptr26 + (r2 + (128*x1)), tmp159 & xmask, eviction_policy='evict_last', other=0.0)
tmp161 = tl.full([1, 1], 26, tl.int64)
tmp162 = tmp0 >= tmp161
tmp163 = tmp0 < tmp156
tmp164 = tmp162 & tmp163
tmp165 = tl.load(in_ptr27 + (r2 + (128*x1)), tmp164 & xmask, eviction_policy='evict_last', other=0.0)
tmp166 = tmp0 >= tmp128
tmp167 = tmp0 < tmp161
tmp168 = tmp166 & tmp167
tmp169 = tl.load(in_ptr28 + (r2 + (128*x1)), tmp168 & xmask, eviction_policy='evict_last', other=0.0)
tmp170 = tl.where(tmp168, tmp169, tmp149)
tmp171 = tl.where(tmp164, tmp165, tmp170)
tmp172 = tl.where(tmp159, tmp160, tmp171)
tmp173 = tl.where(tmp154, tmp155, tmp172)
tmp174 = tl.full([1, 1], 32, tl.int64)
tmp175 = tmp0 >= tmp174
tmp176 = tl.full([1, 1], 33, tl.int64)
tmp177 = tmp0 < tmp176
tmp178 = tmp175 & tmp177
tmp179 = tl.load(in_ptr29 + (r2 + (128*x1)), tmp178 & xmask, eviction_policy='evict_last', other=0.0)
tmp180 = tl.full([1, 1], 31, tl.int64)
tmp181 = tmp0 >= tmp180
tmp182 = tmp0 < tmp174
tmp183 = tmp181 & tmp182
tmp184 = tl.load(in_ptr30 + (r2 + (128*x1)), tmp183 & xmask, eviction_policy='evict_last', other=0.0)
tmp185 = tl.full([1, 1], 30, tl.int64)
tmp186 = tmp0 >= tmp185
tmp187 = tmp0 < tmp180
tmp188 = tmp186 & tmp187
tmp189 = tl.load(in_ptr31 + (r2 + (128*x1)), tmp188 & xmask, eviction_policy='evict_last', other=0.0)
tmp190 = tmp0 >= tmp152
tmp191 = tmp0 < tmp185
tmp192 = tmp190 & tmp191
tmp193 = tl.load(in_ptr32 + (r2 + (128*x1)), tmp192 & xmask, eviction_policy='evict_last', other=0.0)
tmp194 = tl.where(tmp192, tmp193, tmp173)
tmp195 = tl.where(tmp188, tmp189, tmp194)
tmp196 = tl.where(tmp183, tmp184, tmp195)
tmp197 = tl.where(tmp178, tmp179, tmp196)
tmp198 = tl.full([1, 1], 36, tl.int64)
tmp199 = tmp0 >= tmp198
tmp200 = tl.full([1, 1], 37, tl.int64)
tmp201 = tmp0 < tmp200
tmp202 = tmp199 & tmp201
tmp203 = tl.load(in_ptr33 + (r2 + (128*x1)), tmp202 & xmask, eviction_policy='evict_last', other=0.0)
tmp204 = tl.full([1, 1], 35, tl.int64)
tmp205 = tmp0 >= tmp204
tmp206 = tmp0 < tmp198
tmp207 = tmp205 & tmp206
tmp208 = tl.load(in_ptr34 + (r2 + (128*x1)), tmp207 & xmask, eviction_policy='evict_last', other=0.0)
tmp209 = tl.full([1, 1], 34, tl.int64)
tmp210 = tmp0 >= tmp209
tmp211 = tmp0 < tmp204
tmp212 = tmp210 & tmp211
tmp213 = tl.load(in_ptr35 + (r2 + (128*x1)), tmp212 & xmask, eviction_policy='evict_last', other=0.0)
tmp214 = tmp0 >= tmp176
tmp215 = tmp0 < tmp209
tmp216 = tmp214 & tmp215
tmp217 = tl.load(in_ptr36 + (r2 + (128*x1)), tmp216 & xmask, eviction_policy='evict_last', other=0.0)
tmp218 = tl.where(tmp216, tmp217, tmp197)
tmp219 = tl.where(tmp212, tmp213, tmp218)
tmp220 = tl.where(tmp207, tmp208, tmp219)
tmp221 = tl.where(tmp202, tmp203, tmp220)
tmp222 = tl.full([1, 1], 40, tl.int64)
tmp223 = tmp0 >= tmp222
tmp224 = tl.full([1, 1], 41, tl.int64)
tmp225 = tmp0 < tmp224
tmp226 = tmp223 & tmp225
tmp227 = tl.load(in_ptr37 + (r2 + (128*x1)), tmp226 & xmask, eviction_policy='evict_last', other=0.0)
tmp228 = tl.full([1, 1], 39, tl.int64)
tmp229 = tmp0 >= tmp228
tmp230 = tmp0 < tmp222
tmp231 = tmp229 & tmp230
tmp232 = tl.load(in_ptr38 + (r2 + (128*x1)), tmp231 & xmask, eviction_policy='evict_last', other=0.0)
tmp233 = tl.full([1, 1], 38, tl.int64)
tmp234 = tmp0 >= tmp233
tmp235 = tmp0 < tmp228
tmp236 = tmp234 & tmp235
tmp237 = tl.load(in_ptr39 + (r2 + (128*x1)), tmp236 & xmask, eviction_policy='evict_last', other=0.0)
tmp238 = tmp0 >= tmp200
tmp239 = tmp0 < tmp233
tmp240 = tmp238 & tmp239
tmp241 = tl.load(in_ptr40 + (r2 + (128*x1)), tmp240 & xmask, eviction_policy='evict_last', other=0.0)
tmp242 = tl.where(tmp240, tmp241, tmp221)
tmp243 = tl.where(tmp236, tmp237, tmp242)
tmp244 = tl.where(tmp231, tmp232, tmp243)
tmp245 = tl.where(tmp226, tmp227, tmp244)
tmp246 = tl.full([1, 1], 44, tl.int64)
tmp247 = tmp0 >= tmp246
tmp248 = tl.full([1, 1], 45, tl.int64)
tmp249 = tmp0 < tmp248
tmp250 = tmp247 & tmp249
tmp251 = tl.load(in_ptr41 + (r2 + (128*x1)), tmp250 & xmask, eviction_policy='evict_last', other=0.0)
tmp252 = tl.full([1, 1], 43, tl.int64)
tmp253 = tmp0 >= tmp252
tmp254 = tmp0 < tmp246
tmp255 = tmp253 & tmp254
tmp256 = tl.load(in_ptr42 + (r2 + (128*x1)), tmp255 & xmask, eviction_policy='evict_last', other=0.0)
tmp257 = tl.full([1, 1], 42, tl.int64)
tmp258 = tmp0 >= tmp257
tmp259 = tmp0 < tmp252
tmp260 = tmp258 & tmp259
tmp261 = tl.load(in_ptr43 + (r2 + (128*x1)), tmp260 & xmask, eviction_policy='evict_last', other=0.0)
tmp262 = tmp0 >= tmp224
tmp263 = tmp0 < tmp257
tmp264 = tmp262 & tmp263
tmp265 = tl.load(in_ptr44 + (r2 + (128*x1)), tmp264 & xmask, eviction_policy='evict_last', other=0.0)
tmp266 = tl.where(tmp264, tmp265, tmp245)
tmp267 = tl.where(tmp260, tmp261, tmp266)
tmp268 = tl.where(tmp255, tmp256, tmp267)
tmp269 = tl.where(tmp250, tmp251, tmp268)
tmp270 = tl.full([1, 1], 48, tl.int64)
tmp271 = tmp0 >= tmp270
tmp272 = tl.full([1, 1], 49, tl.int64)
tmp273 = tmp0 < tmp272
tmp274 = tmp271 & tmp273
tmp275 = tl.load(in_ptr45 + (r2 + (128*x1)), tmp274 & xmask, eviction_policy='evict_last', other=0.0)
tmp276 = tl.full([1, 1], 47, tl.int64)
tmp277 = tmp0 >= tmp276
tmp278 = tmp0 < tmp270
tmp279 = tmp277 & tmp278
tmp280 = tl.load(in_ptr46 + (r2 + (128*x1)), tmp279 & xmask, eviction_policy='evict_last', other=0.0)
tmp281 = tl.full([1, 1], 46, tl.int64)
tmp282 = tmp0 >= tmp281
tmp283 = tmp0 < tmp276
tmp284 = tmp282 & tmp283
tmp285 = tl.load(in_ptr47 + (r2 + (128*x1)), tmp284 & xmask, eviction_policy='evict_last', other=0.0)
tmp286 = tmp0 >= tmp248
tmp287 = tmp0 < tmp281
tmp288 = tmp286 & tmp287
tmp289 = tl.load(in_ptr48 + (r2 + (128*x1)), tmp288 & xmask, eviction_policy='evict_last', other=0.0)
tmp290 = tl.where(tmp288, tmp289, tmp269)
tmp291 = tl.where(tmp284, tmp285, tmp290)
tmp292 = tl.where(tmp279, tmp280, tmp291)
tmp293 = tl.where(tmp274, tmp275, tmp292)
tmp294 = tl.full([1, 1], 52, tl.int64)
tmp295 = tmp0 >= tmp294
tmp296 = tl.full([1, 1], 53, tl.int64)
tmp297 = tmp0 < tmp296
tmp298 = tmp295 & tmp297
tmp299 = tl.load(in_ptr49 + (r2 + (128*x1)), tmp298 & xmask, eviction_policy='evict_last', other=0.0)
tmp300 = tl.full([1, 1], 51, tl.int64)
tmp301 = tmp0 >= tmp300
tmp302 = tmp0 < tmp294
tmp303 = tmp301 & tmp302
tmp304 = tl.load(in_ptr50 + (r2 + (128*x1)), tmp303 & xmask, eviction_policy='evict_last', other=0.0)
tmp305 = tl.full([1, 1], 50, tl.int64)
tmp306 = tmp0 >= tmp305
tmp307 = tmp0 < tmp300
tmp308 = tmp306 & tmp307
tmp309 = tl.load(in_ptr51 + (r2 + (128*x1)), tmp308 & xmask, eviction_policy='evict_last', other=0.0)
tmp310 = tmp0 >= tmp272
tmp311 = tmp0 < tmp305
tmp312 = tmp310 & tmp311
tmp313 = tl.load(in_ptr52 + (r2 + (128*x1)), tmp312 & xmask, eviction_policy='evict_last', other=0.0)
tmp314 = tl.where(tmp312, tmp313, tmp293)
tmp315 = tl.where(tmp308, tmp309, tmp314)
tmp316 = tl.where(tmp303, tmp304, tmp315)
tmp317 = tl.where(tmp298, tmp299, tmp316)
tmp318 = tl.full([1, 1], 56, tl.int64)
tmp319 = tmp0 >= tmp318
tmp320 = tl.full([1, 1], 57, tl.int64)
tmp321 = tmp0 < tmp320
tmp322 = tmp319 & tmp321
tmp323 = tl.load(in_ptr53 + (r2 + (128*x1)), tmp322 & xmask, eviction_policy='evict_last', other=0.0)
tmp324 = tl.full([1, 1], 55, tl.int64)
tmp325 = tmp0 >= tmp324
tmp326 = tmp0 < tmp318
tmp327 = tmp325 & tmp326
tmp328 = tl.load(in_ptr54 + (r2 + (128*x1)), tmp327 & xmask, eviction_policy='evict_last', other=0.0)
tmp329 = tl.full([1, 1], 54, tl.int64)
tmp330 = tmp0 >= tmp329
tmp331 = tmp0 < tmp324
tmp332 = tmp330 & tmp331
tmp333 = tl.load(in_ptr55 + (r2 + (128*x1)), tmp332 & xmask, eviction_policy='evict_last', other=0.0)
tmp334 = tmp0 >= tmp296
tmp335 = tmp0 < tmp329
tmp336 = tmp334 & tmp335
tmp337 = tl.load(in_ptr56 + (r2 + (128*x1)), tmp336 & xmask, eviction_policy='evict_last', other=0.0)
tmp338 = tl.where(tmp336, tmp337, tmp317)
tmp339 = tl.where(tmp332, tmp333, tmp338)
tmp340 = tl.where(tmp327, tmp328, tmp339)
tmp341 = tl.where(tmp322, tmp323, tmp340)
tmp342 = tl.full([1, 1], 60, tl.int64)
tmp343 = tmp0 >= tmp342
tmp344 = tl.full([1, 1], 61, tl.int64)
tmp345 = tmp0 < tmp344
tmp346 = tmp343 & tmp345
tmp347 = tl.load(in_ptr57 + (r2 + (128*x1)), tmp346 & xmask, eviction_policy='evict_last', other=0.0)
tmp348 = tl.full([1, 1], 59, tl.int64)
tmp349 = tmp0 >= tmp348
tmp350 = tmp0 < tmp342
tmp351 = tmp349 & tmp350
tmp352 = tl.load(in_ptr58 + (r2 + (128*x1)), tmp351 & xmask, eviction_policy='evict_last', other=0.0)
tmp353 = tl.full([1, 1], 58, tl.int64)
tmp354 = tmp0 >= tmp353
tmp355 = tmp0 < tmp348
tmp356 = tmp354 & tmp355
tmp357 = tl.load(in_ptr59 + (r2 + (128*x1)), tmp356 & xmask, eviction_policy='evict_last', other=0.0)
tmp358 = tmp0 >= tmp320
tmp359 = tmp0 < tmp353
tmp360 = tmp358 & tmp359
tmp361 = tl.load(in_ptr60 + (r2 + (128*x1)), tmp360 & xmask, eviction_policy='evict_last', other=0.0)
tmp362 = tl.where(tmp360, tmp361, tmp341)
tmp363 = tl.where(tmp356, tmp357, tmp362)
tmp364 = tl.where(tmp351, tmp352, tmp363)
tmp365 = tl.where(tmp346, tmp347, tmp364)
tmp366 = tl.full([1, 1], 63, tl.int64)
tmp367 = tmp0 >= tmp366
tmp368 = tl.load(in_ptr61 + (r2 + (128*x1)), tmp367 & xmask, eviction_policy='evict_last', other=0.0)
tmp369 = tl.full([1, 1], 62, tl.int64)
tmp370 = tmp0 >= tmp369
tmp371 = tmp0 < tmp366
tmp372 = tmp370 & tmp371
tmp373 = tl.load(in_ptr62 + (r2 + (128*x1)), tmp372 & xmask, eviction_policy='evict_last', other=0.0)
tmp374 = tmp0 >= tmp344
tmp375 = tmp0 < tmp369
tmp376 = tmp374 & tmp375
tmp377 = tl.load(in_ptr63 + (r2 + (128*x1)), tmp376 & xmask, eviction_policy='evict_last', other=0.0)
tmp378 = tl.where(tmp376, tmp377, tmp365)
tmp379 = tl.where(tmp372, tmp373, tmp378)
tmp380 = tl.where(tmp367, tmp368, tmp379)
tmp381 = tmp380 * tmp380
tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK])
tmp384 = tl.where(xmask, tmp382, 0)
tmp385 = tl.sum(tmp384, 1)[:, None]
tmp386 = libdevice.sqrt(tmp385)
tl.store(in_out_ptr0 + (r2 + (128*x3)), tmp380, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp386, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xt/cxtqnduhok2v4j5onaq63427jwrj6hwwv7ld23vqmaek6fsjbf2b.py
# Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# vlad_3 => div_3, pow_5, pow_6, sum_68
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {})
# %sum_68 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {})
# %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_68, 0.5), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %expand_66), kwargs = {})
triton_red_fused_div_linalg_vector_norm_7 = async_compile.triton('triton_red_fused_div_linalg_vector_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[4, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_div_linalg_vector_norm_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 4
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = 1e-12
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 / tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = _tmp7 + tmp6
_tmp7 = tl.where(rmask & xmask, tmp8, _tmp7)
tmp7 = tl.sum(_tmp7, 1)[:, None]
tmp9 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = 1e-12
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp10 / tmp13
tmp15 = triton_helpers.maximum(tmp9, tmp12)
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (r1 + (8192*x0)), tmp16, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_3, (64, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_red_fused_linalg_vector_norm_0.run(primals_1, buf0, 16384, 128, grid=grid(16384), stream=stream0)
buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, residual_2, residual_4, residual_6, residual_8, residual_10, residual_12, residual_14, residual_16, residual_18, residual_20, residual_22, residual_24, residual_26, residual_28, residual_30, residual_32, residual_34, residual_36, residual_38, residual_40, residual_42, residual_44, residual_46, residual_48, residual_50, residual_52, residual_54, residual_56, residual_58, residual_60, residual_62, residual_64, residual_66, residual_68, residual_70, residual_72, residual_74, residual_76, residual_78, residual_80, residual_82, residual_84, residual_86, residual_88, residual_90, residual_92, residual_94, residual_96, residual_98, residual_100, residual_102, residual_104, residual_106, residual_108, residual_110, residual_112, residual_114, residual_116, residual_118, residual_120, residual_122, residual_124, residual_126], Original ATen: [aten.div, aten.sub]
triton_poi_fused_div_sub_1.run(primals_1, buf0, primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, 2097152, grid=grid(2097152), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [soft_assign_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_2.run(buf2, buf3, buf4, 16384, 64, grid=grid(16384), stream=stream0)
buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_3, sum_2, residual_5, sum_3, residual_7, sum_4, residual_9, sum_5, residual_11, sum_6, residual_13, sum_7, residual_15, sum_8, residual_17, sum_9, residual_19, sum_10, residual_21, sum_11, residual_23, sum_12, residual_25, sum_13, residual_27, sum_14, residual_29, sum_15, residual_31, sum_16, residual_33, sum_17, residual_35, sum_18, residual_37, sum_19, residual_39, sum_20, residual_41, sum_21, residual_43, sum_22, residual_45, sum_23, residual_47, sum_24, residual_49, sum_25, residual_51, sum_26, residual_53, sum_27, residual_55, sum_28, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum]
triton_red_fused_mul_sub_sum_3.run(buf1, primals_3, buf2, buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, 512, 4096, grid=grid(512), stream=stream0)
buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual_59, sum_30, residual_61, sum_31, residual_63, sum_32, residual_65, sum_33, residual_67, sum_34, residual_69, sum_35, residual_71, sum_36, residual_73, sum_37, residual_75, sum_38, residual_77, sum_39, residual_79, sum_40, residual_81, sum_41, residual_83, sum_42, residual_85, sum_43, residual_87, sum_44, residual_89, sum_45, residual_91, sum_46, residual_93, sum_47, residual_95, sum_48, residual_97, sum_49, residual_99, sum_50, residual_101, sum_51, residual_103, sum_52, residual_105, sum_53, residual_107, sum_54, residual_109, sum_55, residual_111, sum_56, residual_113, sum_57], Original ATen: [aten.mul, aten.sum]
triton_red_fused_mul_sum_4.run(buf69, buf2, buf3, buf4, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, 512, 4096, grid=grid(512), stream=stream0)
buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual_115, sum_58, residual_117, sum_59, residual_119, sum_60, residual_121, sum_61, residual_123, sum_62, residual_125, sum_63, residual_127, sum_64], Original ATen: [aten.mul, aten.sum]
triton_red_fused_mul_sum_5.run(buf132, buf2, buf3, buf4, buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135, buf137, buf139, buf142, buf144, buf146, 512, 4096, grid=grid(512), stream=stream0)
buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32)
buf23 = buf14; del buf14 # reuse
buf32 = buf23; del buf23 # reuse
buf41 = buf32; del buf32 # reuse
buf50 = buf41; del buf41 # reuse
buf59 = buf50; del buf50 # reuse
buf68 = buf59; del buf59 # reuse
buf77 = buf68; del buf68 # reuse
buf86 = buf77; del buf77 # reuse
buf95 = buf86; del buf86 # reuse
buf104 = buf95; del buf95 # reuse
buf113 = buf104; del buf104 # reuse
buf122 = buf113; del buf113 # reuse
buf131 = buf122; del buf122 # reuse
buf140 = buf131; del buf131 # reuse
buf147 = buf140; del buf140 # reuse
buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32)
buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0); del buf148 # reuse
# Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63, vlad_1], Original ATen: [aten.zeros, aten.copy, aten.linalg_vector_norm]
triton_per_fused_copy_linalg_vector_norm_zeros_6.run(buf147, buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18, buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34, buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67, buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83, buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99, buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117, buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135, buf133, buf146, buf144, buf142, 256, 128, grid=grid(256), stream=stream0)
del buf101
del buf103
del buf106
del buf108
del buf11
del buf110
del buf112
del buf115
del buf117
del buf119
del buf121
del buf124
del buf126
del buf128
del buf13
del buf130
del buf133
del buf135
del buf137
del buf139
del buf142
del buf144
del buf146
del buf16
del buf18
del buf20
del buf22
del buf25
del buf27
del buf29
del buf31
del buf34
del buf36
del buf38
del buf40
del buf43
del buf45
del buf47
del buf49
del buf5
del buf52
del buf54
del buf56
del buf58
del buf61
del buf63
del buf65
del buf67
del buf7
del buf70
del buf72
del buf74
del buf76
del buf79
del buf81
del buf83
del buf85
del buf88
del buf9
del buf90
del buf92
del buf94
del buf97
del buf99
buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0); del buf150 # reuse
buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32)
# Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_red_fused_div_linalg_vector_norm_7.run(buf151, buf147, buf149, buf152, 4, 8192, grid=grid(4), stream=stream0)
return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor(primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 128, 64, 64), (524288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from sklearn.neighbors import NearestNeighbors
class NetVLAD(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, num_clusters=64, dim=128, normalize_input=True,
vladv2=False):
"""
Args:
num_clusters : int
The number of clusters
dim : int
Dimension of descriptors
alpha : float
Parameter of initialization. Larger value is harder assignment.
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
vladv2 : bool
If true, use vladv2 otherwise use vladv1
"""
super(NetVLAD, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=
vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
if self.vladv2 is False:
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clstsAssign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha *
clstsAssign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
else:
knn = NearestNeighbors(n_jobs=-1)
knn.fit(traindescs)
del traindescs
dsSq = np.square(knn.kneighbors(clsts, 2)[1])
del knn
self.alpha = (-np.log(0.01) / np.mean(dsSq[:, 1] - dsSq[:, 0])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
del clsts, dsSq
self.conv.weight = nn.Parameter((2.0 * self.alpha * self.
centroids).unsqueeze(-1).unsqueeze(-1))
self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm
(dim=1))
def forward(self, x):
N, C = x.shape[:2]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1)
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = F.softmax(soft_assign, dim=1)
x_flatten = x.view(N, C, -1)
vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, layout
=x.layout, device=x.device)
for C in range(self.num_clusters):
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3
) - self.centroids[C:C + 1, :].expand(x_flatten.size(-1), -
1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign[:, C:C + 1, :].unsqueeze(2)
vlad[:, C:C + 1, :] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2)
vlad = vlad.view(x.size(0), -1)
vlad = F.normalize(vlad, p=2, dim=1)
return vlad
def get_inputs():
return [torch.rand([4, 128, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import nn
from sklearn.neighbors import NearestNeighbors
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x1 = xindex // 4096
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13,
out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19,
out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25,
out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31,
out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37,
out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43,
out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49,
out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55,
out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61,
out_ptr62, out_ptr63, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 524288
x1 = xindex // 4096 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last')
tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last')
tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last')
tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last')
tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last')
tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last')
tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last')
tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last')
tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last')
tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last')
tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last')
tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last')
tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last')
tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last')
tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last')
tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last')
tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last')
tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last')
tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last')
tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last')
tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last')
tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last')
tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last')
tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last')
tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last')
tmp2 = libdevice.sqrt(tmp1)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tmp7 = tmp5 - tmp6
tmp9 = tmp5 - tmp8
tmp11 = tmp5 - tmp10
tmp13 = tmp5 - tmp12
tmp15 = tmp5 - tmp14
tmp17 = tmp5 - tmp16
tmp19 = tmp5 - tmp18
tmp21 = tmp5 - tmp20
tmp23 = tmp5 - tmp22
tmp25 = tmp5 - tmp24
tmp27 = tmp5 - tmp26
tmp29 = tmp5 - tmp28
tmp31 = tmp5 - tmp30
tmp33 = tmp5 - tmp32
tmp35 = tmp5 - tmp34
tmp37 = tmp5 - tmp36
tmp39 = tmp5 - tmp38
tmp41 = tmp5 - tmp40
tmp43 = tmp5 - tmp42
tmp45 = tmp5 - tmp44
tmp47 = tmp5 - tmp46
tmp49 = tmp5 - tmp48
tmp51 = tmp5 - tmp50
tmp53 = tmp5 - tmp52
tmp55 = tmp5 - tmp54
tmp57 = tmp5 - tmp56
tmp59 = tmp5 - tmp58
tmp61 = tmp5 - tmp60
tmp63 = tmp5 - tmp62
tmp65 = tmp5 - tmp64
tmp67 = tmp5 - tmp66
tmp69 = tmp5 - tmp68
tmp71 = tmp5 - tmp70
tmp73 = tmp5 - tmp72
tmp75 = tmp5 - tmp74
tmp77 = tmp5 - tmp76
tmp79 = tmp5 - tmp78
tmp81 = tmp5 - tmp80
tmp83 = tmp5 - tmp82
tmp85 = tmp5 - tmp84
tmp87 = tmp5 - tmp86
tmp89 = tmp5 - tmp88
tmp91 = tmp5 - tmp90
tmp93 = tmp5 - tmp92
tmp95 = tmp5 - tmp94
tmp97 = tmp5 - tmp96
tmp99 = tmp5 - tmp98
tmp101 = tmp5 - tmp100
tmp103 = tmp5 - tmp102
tmp105 = tmp5 - tmp104
tmp107 = tmp5 - tmp106
tmp109 = tmp5 - tmp108
tmp111 = tmp5 - tmp110
tmp113 = tmp5 - tmp112
tmp115 = tmp5 - tmp114
tmp117 = tmp5 - tmp116
tmp119 = tmp5 - tmp118
tmp121 = tmp5 - tmp120
tmp123 = tmp5 - tmp122
tmp125 = tmp5 - tmp124
tmp127 = tmp5 - tmp126
tmp129 = tmp5 - tmp128
tmp131 = tmp5 - tmp130
tl.store(out_ptr0 + x3, tmp5, None)
tl.store(out_ptr1 + x3, tmp7, None)
tl.store(out_ptr2 + x3, tmp9, None)
tl.store(out_ptr3 + x3, tmp11, None)
tl.store(out_ptr4 + x3, tmp13, None)
tl.store(out_ptr5 + x3, tmp15, None)
tl.store(out_ptr6 + x3, tmp17, None)
tl.store(out_ptr7 + x3, tmp19, None)
tl.store(out_ptr8 + x3, tmp21, None)
tl.store(out_ptr9 + x3, tmp23, None)
tl.store(out_ptr10 + x3, tmp25, None)
tl.store(out_ptr11 + x3, tmp27, None)
tl.store(out_ptr12 + x3, tmp29, None)
tl.store(out_ptr13 + x3, tmp31, None)
tl.store(out_ptr14 + x3, tmp33, None)
tl.store(out_ptr15 + x3, tmp35, None)
tl.store(out_ptr16 + x3, tmp37, None)
tl.store(out_ptr17 + x3, tmp39, None)
tl.store(out_ptr18 + x3, tmp41, None)
tl.store(out_ptr19 + x3, tmp43, None)
tl.store(out_ptr20 + x3, tmp45, None)
tl.store(out_ptr21 + x3, tmp47, None)
tl.store(out_ptr22 + x3, tmp49, None)
tl.store(out_ptr23 + x3, tmp51, None)
tl.store(out_ptr24 + x3, tmp53, None)
tl.store(out_ptr25 + x3, tmp55, None)
tl.store(out_ptr26 + x3, tmp57, None)
tl.store(out_ptr27 + x3, tmp59, None)
tl.store(out_ptr28 + x3, tmp61, None)
tl.store(out_ptr29 + x3, tmp63, None)
tl.store(out_ptr30 + x3, tmp65, None)
tl.store(out_ptr31 + x3, tmp67, None)
tl.store(out_ptr32 + x3, tmp69, None)
tl.store(out_ptr33 + x3, tmp71, None)
tl.store(out_ptr34 + x3, tmp73, None)
tl.store(out_ptr35 + x3, tmp75, None)
tl.store(out_ptr36 + x3, tmp77, None)
tl.store(out_ptr37 + x3, tmp79, None)
tl.store(out_ptr38 + x3, tmp81, None)
tl.store(out_ptr39 + x3, tmp83, None)
tl.store(out_ptr40 + x3, tmp85, None)
tl.store(out_ptr41 + x3, tmp87, None)
tl.store(out_ptr42 + x3, tmp89, None)
tl.store(out_ptr43 + x3, tmp91, None)
tl.store(out_ptr44 + x3, tmp93, None)
tl.store(out_ptr45 + x3, tmp95, None)
tl.store(out_ptr46 + x3, tmp97, None)
tl.store(out_ptr47 + x3, tmp99, None)
tl.store(out_ptr48 + x3, tmp101, None)
tl.store(out_ptr49 + x3, tmp103, None)
tl.store(out_ptr50 + x3, tmp105, None)
tl.store(out_ptr51 + x3, tmp107, None)
tl.store(out_ptr52 + x3, tmp109, None)
tl.store(out_ptr53 + x3, tmp111, None)
tl.store(out_ptr54 + x3, tmp113, None)
tl.store(out_ptr55 + x3, tmp115, None)
tl.store(out_ptr56 + x3, tmp117, None)
tl.store(out_ptr57 + x3, tmp119, None)
tl.store(out_ptr58 + x3, tmp121, None)
tl.store(out_ptr59 + x3, tmp123, None)
tl.store(out_ptr60 + x3, tmp125, None)
tl.store(out_ptr61 + x3, tmp127, None)
tl.store(out_ptr62 + x3, tmp129, None)
tl.store(out_ptr63 + x3, tmp131, None)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4096
x1 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 262144 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, None)
tl.store(out_ptr1 + x3, tmp8, None)
@triton.jit
def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17,
in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24,
in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31,
in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12,
out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18,
out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24,
out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 128
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
x1 = xindex // 128
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp3 = tl.load(in_ptr2 + (r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr4 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp14 = tl.load(in_ptr2 + (4096 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp23 = tl.load(in_ptr2 + (8192 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp32 = tl.load(in_ptr2 + (12288 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp41 = tl.load(in_ptr2 + (16384 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp49 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp50 = tl.load(in_ptr2 + (20480 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp59 = tl.load(in_ptr2 + (24576 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp67 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp68 = tl.load(in_ptr2 + (28672 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp77 = tl.load(in_ptr2 + (32768 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp85 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp86 = tl.load(in_ptr2 + (36864 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp94 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp95 = tl.load(in_ptr2 + (40960 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp103 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp104 = tl.load(in_ptr2 + (45056 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp112 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp113 = tl.load(in_ptr2 + (49152 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp121 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp122 = tl.load(in_ptr2 + (53248 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp130 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp131 = tl.load(in_ptr2 + (57344 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp139 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp140 = tl.load(in_ptr2 + (61440 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp148 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp149 = tl.load(in_ptr2 + (65536 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp157 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp158 = tl.load(in_ptr2 + (69632 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp166 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp167 = tl.load(in_ptr2 + (73728 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp175 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp176 = tl.load(in_ptr2 + (77824 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp184 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp185 = tl.load(in_ptr2 + (81920 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp193 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp194 = tl.load(in_ptr2 + (86016 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp202 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp203 = tl.load(in_ptr2 + (90112 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp211 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp212 = tl.load(in_ptr2 + (94208 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp220 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp221 = tl.load(in_ptr2 + (98304 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp229 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp230 = tl.load(in_ptr2 + (102400 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp238 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp239 = tl.load(in_ptr2 + (106496 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp247 = tl.load(in_ptr31 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp248 = tl.load(in_ptr2 + (110592 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp256 = tl.load(in_ptr32 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp257 = tl.load(in_ptr2 + (114688 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask & xmask, tmp12, _tmp11)
tmp15 = tmp14 - tmp4
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 / tmp7
tmp18 = tmp13 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = _tmp20 + tmp19
_tmp20 = tl.where(rmask & xmask, tmp21, _tmp20)
tmp24 = tmp23 - tmp4
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp25 / tmp7
tmp27 = tmp22 * tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = _tmp29 + tmp28
_tmp29 = tl.where(rmask & xmask, tmp30, _tmp29)
tmp33 = tmp32 - tmp4
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp34 / tmp7
tmp36 = tmp31 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = _tmp38 + tmp37
_tmp38 = tl.where(rmask & xmask, tmp39, _tmp38)
tmp42 = tmp41 - tmp4
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp43 / tmp7
tmp45 = tmp40 * tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp48 = _tmp47 + tmp46
_tmp47 = tl.where(rmask & xmask, tmp48, _tmp47)
tmp51 = tmp50 - tmp4
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp52 / tmp7
tmp54 = tmp49 * tmp53
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp57 = _tmp56 + tmp55
_tmp56 = tl.where(rmask & xmask, tmp57, _tmp56)
tmp60 = tmp59 - tmp4
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp61 / tmp7
tmp63 = tmp58 * tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = _tmp65 + tmp64
_tmp65 = tl.where(rmask & xmask, tmp66, _tmp65)
tmp69 = tmp68 - tmp4
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp70 / tmp7
tmp72 = tmp67 * tmp71
tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK])
tmp75 = _tmp74 + tmp73
_tmp74 = tl.where(rmask & xmask, tmp75, _tmp74)
tmp78 = tmp77 - tmp4
tmp79 = tl_math.exp(tmp78)
tmp80 = tmp79 / tmp7
tmp81 = tmp76 * tmp80
tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK])
tmp84 = _tmp83 + tmp82
_tmp83 = tl.where(rmask & xmask, tmp84, _tmp83)
tmp87 = tmp86 - tmp4
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp88 / tmp7
tmp90 = tmp85 * tmp89
tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK])
tmp93 = _tmp92 + tmp91
_tmp92 = tl.where(rmask & xmask, tmp93, _tmp92)
tmp96 = tmp95 - tmp4
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp97 / tmp7
tmp99 = tmp94 * tmp98
tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK])
tmp102 = _tmp101 + tmp100
_tmp101 = tl.where(rmask & xmask, tmp102, _tmp101)
tmp105 = tmp104 - tmp4
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp106 / tmp7
tmp108 = tmp103 * tmp107
tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK])
tmp111 = _tmp110 + tmp109
_tmp110 = tl.where(rmask & xmask, tmp111, _tmp110)
tmp114 = tmp113 - tmp4
tmp115 = tl_math.exp(tmp114)
tmp116 = tmp115 / tmp7
tmp117 = tmp112 * tmp116
tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK])
tmp120 = _tmp119 + tmp118
_tmp119 = tl.where(rmask & xmask, tmp120, _tmp119)
tmp123 = tmp122 - tmp4
tmp124 = tl_math.exp(tmp123)
tmp125 = tmp124 / tmp7
tmp126 = tmp121 * tmp125
tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK])
tmp129 = _tmp128 + tmp127
_tmp128 = tl.where(rmask & xmask, tmp129, _tmp128)
tmp132 = tmp131 - tmp4
tmp133 = tl_math.exp(tmp132)
tmp134 = tmp133 / tmp7
tmp135 = tmp130 * tmp134
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp138 = _tmp137 + tmp136
_tmp137 = tl.where(rmask & xmask, tmp138, _tmp137)
tmp141 = tmp140 - tmp4
tmp142 = tl_math.exp(tmp141)
tmp143 = tmp142 / tmp7
tmp144 = tmp139 * tmp143
tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK])
tmp147 = _tmp146 + tmp145
_tmp146 = tl.where(rmask & xmask, tmp147, _tmp146)
tmp150 = tmp149 - tmp4
tmp151 = tl_math.exp(tmp150)
tmp152 = tmp151 / tmp7
tmp153 = tmp148 * tmp152
tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK])
tmp156 = _tmp155 + tmp154
_tmp155 = tl.where(rmask & xmask, tmp156, _tmp155)
tmp159 = tmp158 - tmp4
tmp160 = tl_math.exp(tmp159)
tmp161 = tmp160 / tmp7
tmp162 = tmp157 * tmp161
tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK])
tmp165 = _tmp164 + tmp163
_tmp164 = tl.where(rmask & xmask, tmp165, _tmp164)
tmp168 = tmp167 - tmp4
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp169 / tmp7
tmp171 = tmp166 * tmp170
tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK])
tmp174 = _tmp173 + tmp172
_tmp173 = tl.where(rmask & xmask, tmp174, _tmp173)
tmp177 = tmp176 - tmp4
tmp178 = tl_math.exp(tmp177)
tmp179 = tmp178 / tmp7
tmp180 = tmp175 * tmp179
tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK])
tmp183 = _tmp182 + tmp181
_tmp182 = tl.where(rmask & xmask, tmp183, _tmp182)
tmp186 = tmp185 - tmp4
tmp187 = tl_math.exp(tmp186)
tmp188 = tmp187 / tmp7
tmp189 = tmp184 * tmp188
tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK])
tmp192 = _tmp191 + tmp190
_tmp191 = tl.where(rmask & xmask, tmp192, _tmp191)
tmp195 = tmp194 - tmp4
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp196 / tmp7
tmp198 = tmp193 * tmp197
tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK])
tmp201 = _tmp200 + tmp199
_tmp200 = tl.where(rmask & xmask, tmp201, _tmp200)
tmp204 = tmp203 - tmp4
tmp205 = tl_math.exp(tmp204)
tmp206 = tmp205 / tmp7
tmp207 = tmp202 * tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = _tmp209 + tmp208
_tmp209 = tl.where(rmask & xmask, tmp210, _tmp209)
tmp213 = tmp212 - tmp4
tmp214 = tl_math.exp(tmp213)
tmp215 = tmp214 / tmp7
tmp216 = tmp211 * tmp215
tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK])
tmp219 = _tmp218 + tmp217
_tmp218 = tl.where(rmask & xmask, tmp219, _tmp218)
tmp222 = tmp221 - tmp4
tmp223 = tl_math.exp(tmp222)
tmp224 = tmp223 / tmp7
tmp225 = tmp220 * tmp224
tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK])
tmp228 = _tmp227 + tmp226
_tmp227 = tl.where(rmask & xmask, tmp228, _tmp227)
tmp231 = tmp230 - tmp4
tmp232 = tl_math.exp(tmp231)
tmp233 = tmp232 / tmp7
tmp234 = tmp229 * tmp233
tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK])
tmp237 = _tmp236 + tmp235
_tmp236 = tl.where(rmask & xmask, tmp237, _tmp236)
tmp240 = tmp239 - tmp4
tmp241 = tl_math.exp(tmp240)
tmp242 = tmp241 / tmp7
tmp243 = tmp238 * tmp242
tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK])
tmp246 = _tmp245 + tmp244
_tmp245 = tl.where(rmask & xmask, tmp246, _tmp245)
tmp249 = tmp248 - tmp4
tmp250 = tl_math.exp(tmp249)
tmp251 = tmp250 / tmp7
tmp252 = tmp247 * tmp251
tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK])
tmp255 = _tmp254 + tmp253
_tmp254 = tl.where(rmask & xmask, tmp255, _tmp254)
tmp258 = tmp257 - tmp4
tmp259 = tl_math.exp(tmp258)
tmp260 = tmp259 / tmp7
tmp261 = tmp256 * tmp260
tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK])
tmp264 = _tmp263 + tmp262
_tmp263 = tl.where(rmask & xmask, tmp264, _tmp263)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + x3, tmp11, xmask)
tmp20 = tl.sum(_tmp20, 1)[:, None]
tl.store(out_ptr1 + x3, tmp20, xmask)
tmp29 = tl.sum(_tmp29, 1)[:, None]
tl.store(out_ptr2 + x3, tmp29, xmask)
tmp38 = tl.sum(_tmp38, 1)[:, None]
tl.store(out_ptr3 + x3, tmp38, xmask)
tmp47 = tl.sum(_tmp47, 1)[:, None]
tl.store(out_ptr4 + x3, tmp47, xmask)
tmp56 = tl.sum(_tmp56, 1)[:, None]
tl.store(out_ptr5 + x3, tmp56, xmask)
tmp65 = tl.sum(_tmp65, 1)[:, None]
tl.store(out_ptr6 + x3, tmp65, xmask)
tmp74 = tl.sum(_tmp74, 1)[:, None]
tl.store(out_ptr7 + x3, tmp74, xmask)
tmp83 = tl.sum(_tmp83, 1)[:, None]
tl.store(out_ptr8 + x3, tmp83, xmask)
tmp92 = tl.sum(_tmp92, 1)[:, None]
tl.store(out_ptr9 + x3, tmp92, xmask)
tmp101 = tl.sum(_tmp101, 1)[:, None]
tl.store(out_ptr10 + x3, tmp101, xmask)
tmp110 = tl.sum(_tmp110, 1)[:, None]
tl.store(out_ptr11 + x3, tmp110, xmask)
tmp119 = tl.sum(_tmp119, 1)[:, None]
tl.store(out_ptr12 + x3, tmp119, xmask)
tmp128 = tl.sum(_tmp128, 1)[:, None]
tl.store(out_ptr13 + x3, tmp128, xmask)
tmp137 = tl.sum(_tmp137, 1)[:, None]
tl.store(out_ptr14 + x3, tmp137, xmask)
tmp146 = tl.sum(_tmp146, 1)[:, None]
tl.store(out_ptr15 + x3, tmp146, xmask)
tmp155 = tl.sum(_tmp155, 1)[:, None]
tl.store(out_ptr16 + x3, tmp155, xmask)
tmp164 = tl.sum(_tmp164, 1)[:, None]
tl.store(out_ptr17 + x3, tmp164, xmask)
tmp173 = tl.sum(_tmp173, 1)[:, None]
tl.store(out_ptr18 + x3, tmp173, xmask)
tmp182 = tl.sum(_tmp182, 1)[:, None]
tl.store(out_ptr19 + x3, tmp182, xmask)
tmp191 = tl.sum(_tmp191, 1)[:, None]
tl.store(out_ptr20 + x3, tmp191, xmask)
tmp200 = tl.sum(_tmp200, 1)[:, None]
tl.store(out_ptr21 + x3, tmp200, xmask)
tmp209 = tl.sum(_tmp209, 1)[:, None]
tl.store(out_ptr22 + x3, tmp209, xmask)
tmp218 = tl.sum(_tmp218, 1)[:, None]
tl.store(out_ptr23 + x3, tmp218, xmask)
tmp227 = tl.sum(_tmp227, 1)[:, None]
tl.store(out_ptr24 + x3, tmp227, xmask)
tmp236 = tl.sum(_tmp236, 1)[:, None]
tl.store(out_ptr25 + x3, tmp236, xmask)
tmp245 = tl.sum(_tmp245, 1)[:, None]
tl.store(out_ptr26 + x3, tmp245, xmask)
tmp254 = tl.sum(_tmp254, 1)[:, None]
tl.store(out_ptr27 + x3, tmp254, xmask)
tmp263 = tl.sum(_tmp263, 1)[:, None]
tl.store(out_ptr28 + x3, tmp263, xmask)
@triton.jit
def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18,
in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25,
in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14,
out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20,
out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26,
out_ptr27, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x1 = xindex // 128
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (118784 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp12 = tl.load(in_ptr1 + (122880 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr1 + (126976 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr1 + (131072 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp39 = tl.load(in_ptr1 + (135168 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp48 = tl.load(in_ptr1 + (139264 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp57 = tl.load(in_ptr1 + (143360 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp66 = tl.load(in_ptr1 + (147456 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp75 = tl.load(in_ptr1 + (151552 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp83 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp84 = tl.load(in_ptr1 + (155648 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp92 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp93 = tl.load(in_ptr1 + (159744 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp101 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp102 = tl.load(in_ptr1 + (163840 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp110 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp111 = tl.load(in_ptr1 + (167936 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp119 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp120 = tl.load(in_ptr1 + (172032 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp128 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp129 = tl.load(in_ptr1 + (176128 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp137 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp138 = tl.load(in_ptr1 + (180224 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp146 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp147 = tl.load(in_ptr1 + (184320 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp155 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp156 = tl.load(in_ptr1 + (188416 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp164 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp165 = tl.load(in_ptr1 + (192512 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp173 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp174 = tl.load(in_ptr1 + (196608 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp182 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp183 = tl.load(in_ptr1 + (200704 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp191 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp192 = tl.load(in_ptr1 + (204800 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp200 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp201 = tl.load(in_ptr1 + (208896 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp209 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp210 = tl.load(in_ptr1 + (212992 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp218 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp219 = tl.load(in_ptr1 + (217088 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp227 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp228 = tl.load(in_ptr1 + (221184 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp236 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp237 = tl.load(in_ptr1 + (225280 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp245 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp246 = tl.load(in_ptr1 + (229376 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp13 = tmp12 - tmp2
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tmp22 = tmp21 - tmp2
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp25 = tmp20 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = _tmp27 + tmp26
_tmp27 = tl.where(rmask & xmask, tmp28, _tmp27)
tmp31 = tmp30 - tmp2
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp34 = tmp29 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = _tmp36 + tmp35
_tmp36 = tl.where(rmask & xmask, tmp37, _tmp36)
tmp40 = tmp39 - tmp2
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp41 / tmp5
tmp43 = tmp38 * tmp42
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = _tmp45 + tmp44
_tmp45 = tl.where(rmask & xmask, tmp46, _tmp45)
tmp49 = tmp48 - tmp2
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp5
tmp52 = tmp47 * tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = _tmp54 + tmp53
_tmp54 = tl.where(rmask & xmask, tmp55, _tmp54)
tmp58 = tmp57 - tmp2
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp59 / tmp5
tmp61 = tmp56 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = _tmp63 + tmp62
_tmp63 = tl.where(rmask & xmask, tmp64, _tmp63)
tmp67 = tmp66 - tmp2
tmp68 = tl_math.exp(tmp67)
tmp69 = tmp68 / tmp5
tmp70 = tmp65 * tmp69
tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK])
tmp73 = _tmp72 + tmp71
_tmp72 = tl.where(rmask & xmask, tmp73, _tmp72)
tmp76 = tmp75 - tmp2
tmp77 = tl_math.exp(tmp76)
tmp78 = tmp77 / tmp5
tmp79 = tmp74 * tmp78
tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK])
tmp82 = _tmp81 + tmp80
_tmp81 = tl.where(rmask & xmask, tmp82, _tmp81)
tmp85 = tmp84 - tmp2
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp86 / tmp5
tmp88 = tmp83 * tmp87
tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK])
tmp91 = _tmp90 + tmp89
_tmp90 = tl.where(rmask & xmask, tmp91, _tmp90)
tmp94 = tmp93 - tmp2
tmp95 = tl_math.exp(tmp94)
tmp96 = tmp95 / tmp5
tmp97 = tmp92 * tmp96
tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK])
tmp100 = _tmp99 + tmp98
_tmp99 = tl.where(rmask & xmask, tmp100, _tmp99)
tmp103 = tmp102 - tmp2
tmp104 = tl_math.exp(tmp103)
tmp105 = tmp104 / tmp5
tmp106 = tmp101 * tmp105
tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK])
tmp109 = _tmp108 + tmp107
_tmp108 = tl.where(rmask & xmask, tmp109, _tmp108)
tmp112 = tmp111 - tmp2
tmp113 = tl_math.exp(tmp112)
tmp114 = tmp113 / tmp5
tmp115 = tmp110 * tmp114
tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK])
tmp118 = _tmp117 + tmp116
_tmp117 = tl.where(rmask & xmask, tmp118, _tmp117)
tmp121 = tmp120 - tmp2
tmp122 = tl_math.exp(tmp121)
tmp123 = tmp122 / tmp5
tmp124 = tmp119 * tmp123
tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK])
tmp127 = _tmp126 + tmp125
_tmp126 = tl.where(rmask & xmask, tmp127, _tmp126)
tmp130 = tmp129 - tmp2
tmp131 = tl_math.exp(tmp130)
tmp132 = tmp131 / tmp5
tmp133 = tmp128 * tmp132
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = _tmp135 + tmp134
_tmp135 = tl.where(rmask & xmask, tmp136, _tmp135)
tmp139 = tmp138 - tmp2
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp140 / tmp5
tmp142 = tmp137 * tmp141
tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK])
tmp145 = _tmp144 + tmp143
_tmp144 = tl.where(rmask & xmask, tmp145, _tmp144)
tmp148 = tmp147 - tmp2
tmp149 = tl_math.exp(tmp148)
tmp150 = tmp149 / tmp5
tmp151 = tmp146 * tmp150
tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK])
tmp154 = _tmp153 + tmp152
_tmp153 = tl.where(rmask & xmask, tmp154, _tmp153)
tmp157 = tmp156 - tmp2
tmp158 = tl_math.exp(tmp157)
tmp159 = tmp158 / tmp5
tmp160 = tmp155 * tmp159
tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK])
tmp163 = _tmp162 + tmp161
_tmp162 = tl.where(rmask & xmask, tmp163, _tmp162)
tmp166 = tmp165 - tmp2
tmp167 = tl_math.exp(tmp166)
tmp168 = tmp167 / tmp5
tmp169 = tmp164 * tmp168
tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK])
tmp172 = _tmp171 + tmp170
_tmp171 = tl.where(rmask & xmask, tmp172, _tmp171)
tmp175 = tmp174 - tmp2
tmp176 = tl_math.exp(tmp175)
tmp177 = tmp176 / tmp5
tmp178 = tmp173 * tmp177
tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK])
tmp181 = _tmp180 + tmp179
_tmp180 = tl.where(rmask & xmask, tmp181, _tmp180)
tmp184 = tmp183 - tmp2
tmp185 = tl_math.exp(tmp184)
tmp186 = tmp185 / tmp5
tmp187 = tmp182 * tmp186
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp190 = _tmp189 + tmp188
_tmp189 = tl.where(rmask & xmask, tmp190, _tmp189)
tmp193 = tmp192 - tmp2
tmp194 = tl_math.exp(tmp193)
tmp195 = tmp194 / tmp5
tmp196 = tmp191 * tmp195
tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK])
tmp199 = _tmp198 + tmp197
_tmp198 = tl.where(rmask & xmask, tmp199, _tmp198)
tmp202 = tmp201 - tmp2
tmp203 = tl_math.exp(tmp202)
tmp204 = tmp203 / tmp5
tmp205 = tmp200 * tmp204
tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK])
tmp208 = _tmp207 + tmp206
_tmp207 = tl.where(rmask & xmask, tmp208, _tmp207)
tmp211 = tmp210 - tmp2
tmp212 = tl_math.exp(tmp211)
tmp213 = tmp212 / tmp5
tmp214 = tmp209 * tmp213
tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK])
tmp217 = _tmp216 + tmp215
_tmp216 = tl.where(rmask & xmask, tmp217, _tmp216)
tmp220 = tmp219 - tmp2
tmp221 = tl_math.exp(tmp220)
tmp222 = tmp221 / tmp5
tmp223 = tmp218 * tmp222
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp226 = _tmp225 + tmp224
_tmp225 = tl.where(rmask & xmask, tmp226, _tmp225)
tmp229 = tmp228 - tmp2
tmp230 = tl_math.exp(tmp229)
tmp231 = tmp230 / tmp5
tmp232 = tmp227 * tmp231
tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK])
tmp235 = _tmp234 + tmp233
_tmp234 = tl.where(rmask & xmask, tmp235, _tmp234)
tmp238 = tmp237 - tmp2
tmp239 = tl_math.exp(tmp238)
tmp240 = tmp239 / tmp5
tmp241 = tmp236 * tmp240
tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK])
tmp244 = _tmp243 + tmp242
_tmp243 = tl.where(rmask & xmask, tmp244, _tmp243)
tmp247 = tmp246 - tmp2
tmp248 = tl_math.exp(tmp247)
tmp249 = tmp248 / tmp5
tmp250 = tmp245 * tmp249
tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK])
tmp253 = _tmp252 + tmp251
_tmp252 = tl.where(rmask & xmask, tmp253, _tmp252)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp9, xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tl.store(out_ptr1 + x3, tmp18, xmask)
tmp27 = tl.sum(_tmp27, 1)[:, None]
tl.store(out_ptr2 + x3, tmp27, xmask)
tmp36 = tl.sum(_tmp36, 1)[:, None]
tl.store(out_ptr3 + x3, tmp36, xmask)
tmp45 = tl.sum(_tmp45, 1)[:, None]
tl.store(out_ptr4 + x3, tmp45, xmask)
tmp54 = tl.sum(_tmp54, 1)[:, None]
tl.store(out_ptr5 + x3, tmp54, xmask)
tmp63 = tl.sum(_tmp63, 1)[:, None]
tl.store(out_ptr6 + x3, tmp63, xmask)
tmp72 = tl.sum(_tmp72, 1)[:, None]
tl.store(out_ptr7 + x3, tmp72, xmask)
tmp81 = tl.sum(_tmp81, 1)[:, None]
tl.store(out_ptr8 + x3, tmp81, xmask)
tmp90 = tl.sum(_tmp90, 1)[:, None]
tl.store(out_ptr9 + x3, tmp90, xmask)
tmp99 = tl.sum(_tmp99, 1)[:, None]
tl.store(out_ptr10 + x3, tmp99, xmask)
tmp108 = tl.sum(_tmp108, 1)[:, None]
tl.store(out_ptr11 + x3, tmp108, xmask)
tmp117 = tl.sum(_tmp117, 1)[:, None]
tl.store(out_ptr12 + x3, tmp117, xmask)
tmp126 = tl.sum(_tmp126, 1)[:, None]
tl.store(out_ptr13 + x3, tmp126, xmask)
tmp135 = tl.sum(_tmp135, 1)[:, None]
tl.store(out_ptr14 + x3, tmp135, xmask)
tmp144 = tl.sum(_tmp144, 1)[:, None]
tl.store(out_ptr15 + x3, tmp144, xmask)
tmp153 = tl.sum(_tmp153, 1)[:, None]
tl.store(out_ptr16 + x3, tmp153, xmask)
tmp162 = tl.sum(_tmp162, 1)[:, None]
tl.store(out_ptr17 + x3, tmp162, xmask)
tmp171 = tl.sum(_tmp171, 1)[:, None]
tl.store(out_ptr18 + x3, tmp171, xmask)
tmp180 = tl.sum(_tmp180, 1)[:, None]
tl.store(out_ptr19 + x3, tmp180, xmask)
tmp189 = tl.sum(_tmp189, 1)[:, None]
tl.store(out_ptr20 + x3, tmp189, xmask)
tmp198 = tl.sum(_tmp198, 1)[:, None]
tl.store(out_ptr21 + x3, tmp198, xmask)
tmp207 = tl.sum(_tmp207, 1)[:, None]
tl.store(out_ptr22 + x3, tmp207, xmask)
tmp216 = tl.sum(_tmp216, 1)[:, None]
tl.store(out_ptr23 + x3, tmp216, xmask)
tmp225 = tl.sum(_tmp225, 1)[:, None]
tl.store(out_ptr24 + x3, tmp225, xmask)
tmp234 = tl.sum(_tmp234, 1)[:, None]
tl.store(out_ptr25 + x3, tmp234, xmask)
tmp243 = tl.sum(_tmp243, 1)[:, None]
tl.store(out_ptr26 + x3, tmp243, xmask)
tmp252 = tl.sum(_tmp252, 1)[:, None]
tl.store(out_ptr27 + x3, tmp252, xmask)
@triton.jit
def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x1 = xindex // 128
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (233472 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp12 = tl.load(in_ptr1 + (237568 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr1 + (241664 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr1 + (245760 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp39 = tl.load(in_ptr1 + (249856 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp48 = tl.load(in_ptr1 + (253952 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp57 = tl.load(in_ptr1 + (258048 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp13 = tmp12 - tmp2
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tmp22 = tmp21 - tmp2
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp25 = tmp20 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = _tmp27 + tmp26
_tmp27 = tl.where(rmask & xmask, tmp28, _tmp27)
tmp31 = tmp30 - tmp2
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp34 = tmp29 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = _tmp36 + tmp35
_tmp36 = tl.where(rmask & xmask, tmp37, _tmp36)
tmp40 = tmp39 - tmp2
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp41 / tmp5
tmp43 = tmp38 * tmp42
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = _tmp45 + tmp44
_tmp45 = tl.where(rmask & xmask, tmp46, _tmp45)
tmp49 = tmp48 - tmp2
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp5
tmp52 = tmp47 * tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = _tmp54 + tmp53
_tmp54 = tl.where(rmask & xmask, tmp55, _tmp54)
tmp58 = tmp57 - tmp2
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp59 / tmp5
tmp61 = tmp56 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = _tmp63 + tmp62
_tmp63 = tl.where(rmask & xmask, tmp64, _tmp63)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp9, xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tl.store(out_ptr1 + x3, tmp18, xmask)
tmp27 = tl.sum(_tmp27, 1)[:, None]
tl.store(out_ptr2 + x3, tmp27, xmask)
tmp36 = tl.sum(_tmp36, 1)[:, None]
tl.store(out_ptr3 + x3, tmp36, xmask)
tmp45 = tl.sum(_tmp45, 1)[:, None]
tl.store(out_ptr4 + x3, tmp45, xmask)
tmp54 = tl.sum(_tmp54, 1)[:, None]
tl.store(out_ptr5 + x3, tmp54, xmask)
tmp63 = tl.sum(_tmp63, 1)[:, None]
tl.store(out_ptr6 + x3, tmp63, xmask)
@triton.jit
def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12,
in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19,
in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26,
in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33,
in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40,
in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47,
in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54,
in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61,
in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 64
r2 = rindex
x1 = xindex // 64
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1, 1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (r2 + 128 * x1), tmp5 & xmask, eviction_policy
='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 3, tl.int64)
tmp8 = tmp0 >= tmp7
tmp9 = tmp0 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tl.load(in_ptr1 + (r2 + 128 * x1), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.full([1, 1], 2, tl.int64)
tmp13 = tmp0 >= tmp12
tmp14 = tmp0 < tmp7
tmp15 = tmp13 & tmp14
tmp16 = tl.load(in_ptr2 + (r2 + 128 * x1), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.full([1, 1], 1, tl.int64)
tmp18 = tmp0 >= tmp17
tmp19 = tmp0 < tmp12
tmp20 = tmp18 & tmp19
tmp21 = tl.load(in_ptr3 + (r2 + 128 * x1), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tmp0 < tmp17
tmp23 = tl.load(in_ptr4 + (r2 + 128 * x1), tmp22 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = 0.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tl.where(tmp20, tmp21, tmp25)
tmp27 = tl.where(tmp15, tmp16, tmp26)
tmp28 = tl.where(tmp10, tmp11, tmp27)
tmp29 = tl.where(tmp5, tmp6, tmp28)
tmp30 = tl.full([1, 1], 8, tl.int64)
tmp31 = tmp0 >= tmp30
tmp32 = tl.full([1, 1], 9, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr5 + (r2 + 128 * x1), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.full([1, 1], 7, tl.int64)
tmp37 = tmp0 >= tmp36
tmp38 = tmp0 < tmp30
tmp39 = tmp37 & tmp38
tmp40 = tl.load(in_ptr6 + (r2 + 128 * x1), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tl.full([1, 1], 6, tl.int64)
tmp42 = tmp0 >= tmp41
tmp43 = tmp0 < tmp36
tmp44 = tmp42 & tmp43
tmp45 = tl.load(in_ptr7 + (r2 + 128 * x1), tmp44 & xmask,
eviction_policy='evict_last', other=0.0)
tmp46 = tmp0 >= tmp3
tmp47 = tmp0 < tmp41
tmp48 = tmp46 & tmp47
tmp49 = tl.load(in_ptr8 + (r2 + 128 * x1), tmp48 & xmask,
eviction_policy='evict_last', other=0.0)
tmp50 = tl.where(tmp48, tmp49, tmp29)
tmp51 = tl.where(tmp44, tmp45, tmp50)
tmp52 = tl.where(tmp39, tmp40, tmp51)
tmp53 = tl.where(tmp34, tmp35, tmp52)
tmp54 = tl.full([1, 1], 12, tl.int64)
tmp55 = tmp0 >= tmp54
tmp56 = tl.full([1, 1], 13, tl.int64)
tmp57 = tmp0 < tmp56
tmp58 = tmp55 & tmp57
tmp59 = tl.load(in_ptr9 + (r2 + 128 * x1), tmp58 & xmask,
eviction_policy='evict_last', other=0.0)
tmp60 = tl.full([1, 1], 11, tl.int64)
tmp61 = tmp0 >= tmp60
tmp62 = tmp0 < tmp54
tmp63 = tmp61 & tmp62
tmp64 = tl.load(in_ptr10 + (r2 + 128 * x1), tmp63 & xmask,
eviction_policy='evict_last', other=0.0)
tmp65 = tl.full([1, 1], 10, tl.int64)
tmp66 = tmp0 >= tmp65
tmp67 = tmp0 < tmp60
tmp68 = tmp66 & tmp67
tmp69 = tl.load(in_ptr11 + (r2 + 128 * x1), tmp68 & xmask,
eviction_policy='evict_last', other=0.0)
tmp70 = tmp0 >= tmp32
tmp71 = tmp0 < tmp65
tmp72 = tmp70 & tmp71
tmp73 = tl.load(in_ptr12 + (r2 + 128 * x1), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp72, tmp73, tmp53)
tmp75 = tl.where(tmp68, tmp69, tmp74)
tmp76 = tl.where(tmp63, tmp64, tmp75)
tmp77 = tl.where(tmp58, tmp59, tmp76)
tmp78 = tl.full([1, 1], 16, tl.int64)
tmp79 = tmp0 >= tmp78
tmp80 = tl.full([1, 1], 17, tl.int64)
tmp81 = tmp0 < tmp80
tmp82 = tmp79 & tmp81
tmp83 = tl.load(in_ptr13 + (r2 + 128 * x1), tmp82 & xmask,
eviction_policy='evict_last', other=0.0)
tmp84 = tl.full([1, 1], 15, tl.int64)
tmp85 = tmp0 >= tmp84
tmp86 = tmp0 < tmp78
tmp87 = tmp85 & tmp86
tmp88 = tl.load(in_ptr14 + (r2 + 128 * x1), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full([1, 1], 14, tl.int64)
tmp90 = tmp0 >= tmp89
tmp91 = tmp0 < tmp84
tmp92 = tmp90 & tmp91
tmp93 = tl.load(in_ptr15 + (r2 + 128 * x1), tmp92 & xmask,
eviction_policy='evict_last', other=0.0)
tmp94 = tmp0 >= tmp56
tmp95 = tmp0 < tmp89
tmp96 = tmp94 & tmp95
tmp97 = tl.load(in_ptr16 + (r2 + 128 * x1), tmp96 & xmask,
eviction_policy='evict_last', other=0.0)
tmp98 = tl.where(tmp96, tmp97, tmp77)
tmp99 = tl.where(tmp92, tmp93, tmp98)
tmp100 = tl.where(tmp87, tmp88, tmp99)
tmp101 = tl.where(tmp82, tmp83, tmp100)
tmp102 = tl.full([1, 1], 20, tl.int64)
tmp103 = tmp0 >= tmp102
tmp104 = tl.full([1, 1], 21, tl.int64)
tmp105 = tmp0 < tmp104
tmp106 = tmp103 & tmp105
tmp107 = tl.load(in_ptr17 + (r2 + 128 * x1), tmp106 & xmask,
eviction_policy='evict_last', other=0.0)
tmp108 = tl.full([1, 1], 19, tl.int64)
tmp109 = tmp0 >= tmp108
tmp110 = tmp0 < tmp102
tmp111 = tmp109 & tmp110
tmp112 = tl.load(in_ptr18 + (r2 + 128 * x1), tmp111 & xmask,
eviction_policy='evict_last', other=0.0)
tmp113 = tl.full([1, 1], 18, tl.int64)
tmp114 = tmp0 >= tmp113
tmp115 = tmp0 < tmp108
tmp116 = tmp114 & tmp115
tmp117 = tl.load(in_ptr19 + (r2 + 128 * x1), tmp116 & xmask,
eviction_policy='evict_last', other=0.0)
tmp118 = tmp0 >= tmp80
tmp119 = tmp0 < tmp113
tmp120 = tmp118 & tmp119
tmp121 = tl.load(in_ptr20 + (r2 + 128 * x1), tmp120 & xmask,
eviction_policy='evict_last', other=0.0)
tmp122 = tl.where(tmp120, tmp121, tmp101)
tmp123 = tl.where(tmp116, tmp117, tmp122)
tmp124 = tl.where(tmp111, tmp112, tmp123)
tmp125 = tl.where(tmp106, tmp107, tmp124)
tmp126 = tl.full([1, 1], 24, tl.int64)
tmp127 = tmp0 >= tmp126
tmp128 = tl.full([1, 1], 25, tl.int64)
tmp129 = tmp0 < tmp128
tmp130 = tmp127 & tmp129
tmp131 = tl.load(in_ptr21 + (r2 + 128 * x1), tmp130 & xmask,
eviction_policy='evict_last', other=0.0)
tmp132 = tl.full([1, 1], 23, tl.int64)
tmp133 = tmp0 >= tmp132
tmp134 = tmp0 < tmp126
tmp135 = tmp133 & tmp134
tmp136 = tl.load(in_ptr22 + (r2 + 128 * x1), tmp135 & xmask,
eviction_policy='evict_last', other=0.0)
tmp137 = tl.full([1, 1], 22, tl.int64)
tmp138 = tmp0 >= tmp137
tmp139 = tmp0 < tmp132
tmp140 = tmp138 & tmp139
tmp141 = tl.load(in_ptr23 + (r2 + 128 * x1), tmp140 & xmask,
eviction_policy='evict_last', other=0.0)
tmp142 = tmp0 >= tmp104
tmp143 = tmp0 < tmp137
tmp144 = tmp142 & tmp143
tmp145 = tl.load(in_ptr24 + (r2 + 128 * x1), tmp144 & xmask,
eviction_policy='evict_last', other=0.0)
tmp146 = tl.where(tmp144, tmp145, tmp125)
tmp147 = tl.where(tmp140, tmp141, tmp146)
tmp148 = tl.where(tmp135, tmp136, tmp147)
tmp149 = tl.where(tmp130, tmp131, tmp148)
tmp150 = tl.full([1, 1], 28, tl.int64)
tmp151 = tmp0 >= tmp150
tmp152 = tl.full([1, 1], 29, tl.int64)
tmp153 = tmp0 < tmp152
tmp154 = tmp151 & tmp153
tmp155 = tl.load(in_ptr25 + (r2 + 128 * x1), tmp154 & xmask,
eviction_policy='evict_last', other=0.0)
tmp156 = tl.full([1, 1], 27, tl.int64)
tmp157 = tmp0 >= tmp156
tmp158 = tmp0 < tmp150
tmp159 = tmp157 & tmp158
tmp160 = tl.load(in_ptr26 + (r2 + 128 * x1), tmp159 & xmask,
eviction_policy='evict_last', other=0.0)
tmp161 = tl.full([1, 1], 26, tl.int64)
tmp162 = tmp0 >= tmp161
tmp163 = tmp0 < tmp156
tmp164 = tmp162 & tmp163
tmp165 = tl.load(in_ptr27 + (r2 + 128 * x1), tmp164 & xmask,
eviction_policy='evict_last', other=0.0)
tmp166 = tmp0 >= tmp128
tmp167 = tmp0 < tmp161
tmp168 = tmp166 & tmp167
tmp169 = tl.load(in_ptr28 + (r2 + 128 * x1), tmp168 & xmask,
eviction_policy='evict_last', other=0.0)
tmp170 = tl.where(tmp168, tmp169, tmp149)
tmp171 = tl.where(tmp164, tmp165, tmp170)
tmp172 = tl.where(tmp159, tmp160, tmp171)
tmp173 = tl.where(tmp154, tmp155, tmp172)
tmp174 = tl.full([1, 1], 32, tl.int64)
tmp175 = tmp0 >= tmp174
tmp176 = tl.full([1, 1], 33, tl.int64)
tmp177 = tmp0 < tmp176
tmp178 = tmp175 & tmp177
tmp179 = tl.load(in_ptr29 + (r2 + 128 * x1), tmp178 & xmask,
eviction_policy='evict_last', other=0.0)
tmp180 = tl.full([1, 1], 31, tl.int64)
tmp181 = tmp0 >= tmp180
tmp182 = tmp0 < tmp174
tmp183 = tmp181 & tmp182
tmp184 = tl.load(in_ptr30 + (r2 + 128 * x1), tmp183 & xmask,
eviction_policy='evict_last', other=0.0)
tmp185 = tl.full([1, 1], 30, tl.int64)
tmp186 = tmp0 >= tmp185
tmp187 = tmp0 < tmp180
tmp188 = tmp186 & tmp187
tmp189 = tl.load(in_ptr31 + (r2 + 128 * x1), tmp188 & xmask,
eviction_policy='evict_last', other=0.0)
tmp190 = tmp0 >= tmp152
tmp191 = tmp0 < tmp185
tmp192 = tmp190 & tmp191
tmp193 = tl.load(in_ptr32 + (r2 + 128 * x1), tmp192 & xmask,
eviction_policy='evict_last', other=0.0)
tmp194 = tl.where(tmp192, tmp193, tmp173)
tmp195 = tl.where(tmp188, tmp189, tmp194)
tmp196 = tl.where(tmp183, tmp184, tmp195)
tmp197 = tl.where(tmp178, tmp179, tmp196)
tmp198 = tl.full([1, 1], 36, tl.int64)
tmp199 = tmp0 >= tmp198
tmp200 = tl.full([1, 1], 37, tl.int64)
tmp201 = tmp0 < tmp200
tmp202 = tmp199 & tmp201
tmp203 = tl.load(in_ptr33 + (r2 + 128 * x1), tmp202 & xmask,
eviction_policy='evict_last', other=0.0)
tmp204 = tl.full([1, 1], 35, tl.int64)
tmp205 = tmp0 >= tmp204
tmp206 = tmp0 < tmp198
tmp207 = tmp205 & tmp206
tmp208 = tl.load(in_ptr34 + (r2 + 128 * x1), tmp207 & xmask,
eviction_policy='evict_last', other=0.0)
tmp209 = tl.full([1, 1], 34, tl.int64)
tmp210 = tmp0 >= tmp209
tmp211 = tmp0 < tmp204
tmp212 = tmp210 & tmp211
tmp213 = tl.load(in_ptr35 + (r2 + 128 * x1), tmp212 & xmask,
eviction_policy='evict_last', other=0.0)
tmp214 = tmp0 >= tmp176
tmp215 = tmp0 < tmp209
tmp216 = tmp214 & tmp215
tmp217 = tl.load(in_ptr36 + (r2 + 128 * x1), tmp216 & xmask,
eviction_policy='evict_last', other=0.0)
tmp218 = tl.where(tmp216, tmp217, tmp197)
tmp219 = tl.where(tmp212, tmp213, tmp218)
tmp220 = tl.where(tmp207, tmp208, tmp219)
tmp221 = tl.where(tmp202, tmp203, tmp220)
tmp222 = tl.full([1, 1], 40, tl.int64)
tmp223 = tmp0 >= tmp222
tmp224 = tl.full([1, 1], 41, tl.int64)
tmp225 = tmp0 < tmp224
tmp226 = tmp223 & tmp225
tmp227 = tl.load(in_ptr37 + (r2 + 128 * x1), tmp226 & xmask,
eviction_policy='evict_last', other=0.0)
tmp228 = tl.full([1, 1], 39, tl.int64)
tmp229 = tmp0 >= tmp228
tmp230 = tmp0 < tmp222
tmp231 = tmp229 & tmp230
tmp232 = tl.load(in_ptr38 + (r2 + 128 * x1), tmp231 & xmask,
eviction_policy='evict_last', other=0.0)
tmp233 = tl.full([1, 1], 38, tl.int64)
tmp234 = tmp0 >= tmp233
tmp235 = tmp0 < tmp228
tmp236 = tmp234 & tmp235
tmp237 = tl.load(in_ptr39 + (r2 + 128 * x1), tmp236 & xmask,
eviction_policy='evict_last', other=0.0)
tmp238 = tmp0 >= tmp200
tmp239 = tmp0 < tmp233
tmp240 = tmp238 & tmp239
tmp241 = tl.load(in_ptr40 + (r2 + 128 * x1), tmp240 & xmask,
eviction_policy='evict_last', other=0.0)
tmp242 = tl.where(tmp240, tmp241, tmp221)
tmp243 = tl.where(tmp236, tmp237, tmp242)
tmp244 = tl.where(tmp231, tmp232, tmp243)
tmp245 = tl.where(tmp226, tmp227, tmp244)
tmp246 = tl.full([1, 1], 44, tl.int64)
tmp247 = tmp0 >= tmp246
tmp248 = tl.full([1, 1], 45, tl.int64)
tmp249 = tmp0 < tmp248
tmp250 = tmp247 & tmp249
tmp251 = tl.load(in_ptr41 + (r2 + 128 * x1), tmp250 & xmask,
eviction_policy='evict_last', other=0.0)
tmp252 = tl.full([1, 1], 43, tl.int64)
tmp253 = tmp0 >= tmp252
tmp254 = tmp0 < tmp246
tmp255 = tmp253 & tmp254
tmp256 = tl.load(in_ptr42 + (r2 + 128 * x1), tmp255 & xmask,
eviction_policy='evict_last', other=0.0)
tmp257 = tl.full([1, 1], 42, tl.int64)
tmp258 = tmp0 >= tmp257
tmp259 = tmp0 < tmp252
tmp260 = tmp258 & tmp259
tmp261 = tl.load(in_ptr43 + (r2 + 128 * x1), tmp260 & xmask,
eviction_policy='evict_last', other=0.0)
tmp262 = tmp0 >= tmp224
tmp263 = tmp0 < tmp257
tmp264 = tmp262 & tmp263
tmp265 = tl.load(in_ptr44 + (r2 + 128 * x1), tmp264 & xmask,
eviction_policy='evict_last', other=0.0)
tmp266 = tl.where(tmp264, tmp265, tmp245)
tmp267 = tl.where(tmp260, tmp261, tmp266)
tmp268 = tl.where(tmp255, tmp256, tmp267)
tmp269 = tl.where(tmp250, tmp251, tmp268)
tmp270 = tl.full([1, 1], 48, tl.int64)
tmp271 = tmp0 >= tmp270
tmp272 = tl.full([1, 1], 49, tl.int64)
tmp273 = tmp0 < tmp272
tmp274 = tmp271 & tmp273
tmp275 = tl.load(in_ptr45 + (r2 + 128 * x1), tmp274 & xmask,
eviction_policy='evict_last', other=0.0)
tmp276 = tl.full([1, 1], 47, tl.int64)
tmp277 = tmp0 >= tmp276
tmp278 = tmp0 < tmp270
tmp279 = tmp277 & tmp278
tmp280 = tl.load(in_ptr46 + (r2 + 128 * x1), tmp279 & xmask,
eviction_policy='evict_last', other=0.0)
tmp281 = tl.full([1, 1], 46, tl.int64)
tmp282 = tmp0 >= tmp281
tmp283 = tmp0 < tmp276
tmp284 = tmp282 & tmp283
tmp285 = tl.load(in_ptr47 + (r2 + 128 * x1), tmp284 & xmask,
eviction_policy='evict_last', other=0.0)
tmp286 = tmp0 >= tmp248
tmp287 = tmp0 < tmp281
tmp288 = tmp286 & tmp287
tmp289 = tl.load(in_ptr48 + (r2 + 128 * x1), tmp288 & xmask,
eviction_policy='evict_last', other=0.0)
tmp290 = tl.where(tmp288, tmp289, tmp269)
tmp291 = tl.where(tmp284, tmp285, tmp290)
tmp292 = tl.where(tmp279, tmp280, tmp291)
tmp293 = tl.where(tmp274, tmp275, tmp292)
tmp294 = tl.full([1, 1], 52, tl.int64)
tmp295 = tmp0 >= tmp294
tmp296 = tl.full([1, 1], 53, tl.int64)
tmp297 = tmp0 < tmp296
tmp298 = tmp295 & tmp297
tmp299 = tl.load(in_ptr49 + (r2 + 128 * x1), tmp298 & xmask,
eviction_policy='evict_last', other=0.0)
tmp300 = tl.full([1, 1], 51, tl.int64)
tmp301 = tmp0 >= tmp300
tmp302 = tmp0 < tmp294
tmp303 = tmp301 & tmp302
tmp304 = tl.load(in_ptr50 + (r2 + 128 * x1), tmp303 & xmask,
eviction_policy='evict_last', other=0.0)
tmp305 = tl.full([1, 1], 50, tl.int64)
tmp306 = tmp0 >= tmp305
tmp307 = tmp0 < tmp300
tmp308 = tmp306 & tmp307
tmp309 = tl.load(in_ptr51 + (r2 + 128 * x1), tmp308 & xmask,
eviction_policy='evict_last', other=0.0)
tmp310 = tmp0 >= tmp272
tmp311 = tmp0 < tmp305
tmp312 = tmp310 & tmp311
tmp313 = tl.load(in_ptr52 + (r2 + 128 * x1), tmp312 & xmask,
eviction_policy='evict_last', other=0.0)
tmp314 = tl.where(tmp312, tmp313, tmp293)
tmp315 = tl.where(tmp308, tmp309, tmp314)
tmp316 = tl.where(tmp303, tmp304, tmp315)
tmp317 = tl.where(tmp298, tmp299, tmp316)
tmp318 = tl.full([1, 1], 56, tl.int64)
tmp319 = tmp0 >= tmp318
tmp320 = tl.full([1, 1], 57, tl.int64)
tmp321 = tmp0 < tmp320
tmp322 = tmp319 & tmp321
tmp323 = tl.load(in_ptr53 + (r2 + 128 * x1), tmp322 & xmask,
eviction_policy='evict_last', other=0.0)
tmp324 = tl.full([1, 1], 55, tl.int64)
tmp325 = tmp0 >= tmp324
tmp326 = tmp0 < tmp318
tmp327 = tmp325 & tmp326
tmp328 = tl.load(in_ptr54 + (r2 + 128 * x1), tmp327 & xmask,
eviction_policy='evict_last', other=0.0)
tmp329 = tl.full([1, 1], 54, tl.int64)
tmp330 = tmp0 >= tmp329
tmp331 = tmp0 < tmp324
tmp332 = tmp330 & tmp331
tmp333 = tl.load(in_ptr55 + (r2 + 128 * x1), tmp332 & xmask,
eviction_policy='evict_last', other=0.0)
tmp334 = tmp0 >= tmp296
tmp335 = tmp0 < tmp329
tmp336 = tmp334 & tmp335
tmp337 = tl.load(in_ptr56 + (r2 + 128 * x1), tmp336 & xmask,
eviction_policy='evict_last', other=0.0)
tmp338 = tl.where(tmp336, tmp337, tmp317)
tmp339 = tl.where(tmp332, tmp333, tmp338)
tmp340 = tl.where(tmp327, tmp328, tmp339)
tmp341 = tl.where(tmp322, tmp323, tmp340)
tmp342 = tl.full([1, 1], 60, tl.int64)
tmp343 = tmp0 >= tmp342
tmp344 = tl.full([1, 1], 61, tl.int64)
tmp345 = tmp0 < tmp344
tmp346 = tmp343 & tmp345
tmp347 = tl.load(in_ptr57 + (r2 + 128 * x1), tmp346 & xmask,
eviction_policy='evict_last', other=0.0)
tmp348 = tl.full([1, 1], 59, tl.int64)
tmp349 = tmp0 >= tmp348
tmp350 = tmp0 < tmp342
tmp351 = tmp349 & tmp350
tmp352 = tl.load(in_ptr58 + (r2 + 128 * x1), tmp351 & xmask,
eviction_policy='evict_last', other=0.0)
tmp353 = tl.full([1, 1], 58, tl.int64)
tmp354 = tmp0 >= tmp353
tmp355 = tmp0 < tmp348
tmp356 = tmp354 & tmp355
tmp357 = tl.load(in_ptr59 + (r2 + 128 * x1), tmp356 & xmask,
eviction_policy='evict_last', other=0.0)
tmp358 = tmp0 >= tmp320
tmp359 = tmp0 < tmp353
tmp360 = tmp358 & tmp359
tmp361 = tl.load(in_ptr60 + (r2 + 128 * x1), tmp360 & xmask,
eviction_policy='evict_last', other=0.0)
tmp362 = tl.where(tmp360, tmp361, tmp341)
tmp363 = tl.where(tmp356, tmp357, tmp362)
tmp364 = tl.where(tmp351, tmp352, tmp363)
tmp365 = tl.where(tmp346, tmp347, tmp364)
tmp366 = tl.full([1, 1], 63, tl.int64)
tmp367 = tmp0 >= tmp366
tmp368 = tl.load(in_ptr61 + (r2 + 128 * x1), tmp367 & xmask,
eviction_policy='evict_last', other=0.0)
tmp369 = tl.full([1, 1], 62, tl.int64)
tmp370 = tmp0 >= tmp369
tmp371 = tmp0 < tmp366
tmp372 = tmp370 & tmp371
tmp373 = tl.load(in_ptr62 + (r2 + 128 * x1), tmp372 & xmask,
eviction_policy='evict_last', other=0.0)
tmp374 = tmp0 >= tmp344
tmp375 = tmp0 < tmp369
tmp376 = tmp374 & tmp375
tmp377 = tl.load(in_ptr63 + (r2 + 128 * x1), tmp376 & xmask,
eviction_policy='evict_last', other=0.0)
tmp378 = tl.where(tmp376, tmp377, tmp365)
tmp379 = tl.where(tmp372, tmp373, tmp378)
tmp380 = tl.where(tmp367, tmp368, tmp379)
tmp381 = tmp380 * tmp380
tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK])
tmp384 = tl.where(xmask, tmp382, 0)
tmp385 = tl.sum(tmp384, 1)[:, None]
tmp386 = libdevice.sqrt(tmp385)
tl.store(in_out_ptr0 + (r2 + 128 * x3), tmp380, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp386, xmask)
@triton.jit
def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = 1e-12
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 / tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = _tmp7 + tmp6
_tmp7 = tl.where(rmask & xmask, tmp8, _tmp7)
tmp7 = tl.sum(_tmp7, 1)[:, None]
tmp9 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = 1e-12
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp10 / tmp13
tmp15 = triton_helpers.maximum(tmp9, tmp12)
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_3, (64, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
get_raw_stream(0)
triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0,
16384, 128, XBLOCK=64, RBLOCK=4, num_warps=8, num_stages=1)
buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096,
1), torch.float32)
buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096,
1), torch.float32)
buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
triton_poi_fused_div_sub_1[grid(2097152)](primals_1, buf0,
primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19,
buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39,
buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60,
buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80,
buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100,
buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118,
buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136,
buf138, buf141, buf143, buf145, 2097152, XBLOCK=512, num_warps=
8, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32)
triton_per_fused__softmax_2[grid(16384)](buf2, buf3, buf4, 16384,
64, XBLOCK=8, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sub_sum_3[grid(512)](buf1, primals_3, buf2,
buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19,
buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39,
buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60,
buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16,
buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36,
buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56,
buf58, buf61, buf63, buf65, buf67, 512, 4096, XBLOCK=1, RBLOCK=
2048, num_warps=16, num_stages=1)
buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sum_4[grid(512)](buf69, buf2, buf3, buf4,
buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89,
buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107,
buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125,
buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83,
buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103,
buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121,
buf124, buf126, buf128, buf130, 512, 4096, XBLOCK=1, RBLOCK=
2048, num_warps=16, num_stages=1)
buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sum_5[grid(512)](buf132, buf2, buf3, buf4,
buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135,
buf137, buf139, buf142, buf144, buf146, 512, 4096, XBLOCK=1,
RBLOCK=1024, num_warps=16, num_stages=1)
buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32)
buf23 = buf14
del buf14
buf32 = buf23
del buf23
buf41 = buf32
del buf32
buf50 = buf41
del buf41
buf59 = buf50
del buf50
buf68 = buf59
del buf59
buf77 = buf68
del buf68
buf86 = buf77
del buf77
buf95 = buf86
del buf86
buf104 = buf95
del buf95
buf113 = buf104
del buf104
buf122 = buf113
del buf113
buf131 = buf122
del buf122
buf140 = buf131
del buf131
buf147 = buf140
del buf140
buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32)
buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0)
del buf148
triton_per_fused_copy_linalg_vector_norm_zeros_6[grid(256)](buf147,
buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18,
buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34,
buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67,
buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83,
buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99,
buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117,
buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135,
buf133, buf146, buf144, buf142, 256, 128, XBLOCK=1, num_warps=2,
num_stages=1)
del buf101
del buf103
del buf106
del buf108
del buf11
del buf110
del buf112
del buf115
del buf117
del buf119
del buf121
del buf124
del buf126
del buf128
del buf13
del buf130
del buf133
del buf135
del buf137
del buf139
del buf142
del buf144
del buf146
del buf16
del buf18
del buf20
del buf22
del buf25
del buf27
del buf29
del buf31
del buf34
del buf36
del buf38
del buf40
del buf43
del buf45
del buf47
del buf49
del buf5
del buf52
del buf54
del buf56
del buf58
del buf61
del buf63
del buf65
del buf67
del buf7
del buf70
del buf72
del buf74
del buf76
del buf79
del buf81
del buf83
del buf85
del buf88
del buf9
del buf90
del buf92
del buf94
del buf97
del buf99
buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0)
del buf150
buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32)
triton_red_fused_div_linalg_vector_norm_7[grid(4)](buf151, buf147,
buf149, buf152, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor(
primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15,
buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35,
buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55,
buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75,
buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96,
buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114,
buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132,
buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151)
class NetVLADNew(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, num_clusters=64, dim=128, normalize_input=True,
vladv2=False):
"""
Args:
num_clusters : int
The number of clusters
dim : int
Dimension of descriptors
alpha : float
Parameter of initialization. Larger value is harder assignment.
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
vladv2 : bool
If true, use vladv2 otherwise use vladv1
"""
super(NetVLADNew, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=
vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
if self.vladv2 is False:
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clstsAssign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha *
clstsAssign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
else:
knn = NearestNeighbors(n_jobs=-1)
knn.fit(traindescs)
del traindescs
dsSq = np.square(knn.kneighbors(clsts, 2)[1])
del knn
self.alpha = (-np.log(0.01) / np.mean(dsSq[:, 1] - dsSq[:, 0])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
del clsts, dsSq
self.conv.weight = nn.Parameter((2.0 * self.alpha * self.
centroids).unsqueeze(-1).unsqueeze(-1))
self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm
(dim=1))
def forward(self, input_0):
primals_3 = self.centroids
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ByungHeeCha/visual_localization | NetVLAD | false | 17,431 | [
"BSD-3-Clause"
] | 3 | 787fb8f6ee5c6e69ece9e83a016d15596e5524bc | https://github.com/ByungHeeCha/visual_localization/tree/787fb8f6ee5c6e69ece9e83a016d15596e5524bc |
SELayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/yg/cygooswl5gkxugqq2ejgag2vtcqhtumn2j3notsgzty3xoxbrq4v.py
# Topologically Sorted Source Nodes: [inputs_squeezed], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# inputs_squeezed => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uk/cuklchmela7xhbcshnibjleggfv3a4aivpmu37zsm7nthvihuy24.py
# Topologically Sorted Source Nodes: [conv2d, sigmoid, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# conv2d => convolution
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %convolution), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp3
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/x7/cx7evvvm7te22h7xf3yh7pnjatqie5vy54vyorfffrtctztd4wn5.py
# Topologically Sorted Source Nodes: [inputs_squeezed_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# inputs_squeezed_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vz/cvzlcxmuowtstgqrxzb5hcsechd32n3vjbzbpf457cjjvsojtkea.py
# Topologically Sorted Source Nodes: [sigmoid_1, mul_1], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# sigmoid_1 => sigmoid_1
# Graph fragment:
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid_1), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [inputs_squeezed], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, sigmoid, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
triton_poi_fused_convolution_mul_sigmoid_1.run(buf3, primals_3, buf4, 4, grid=grid(4), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [inputs_squeezed_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [inputs_squeezed_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_1, mul_1], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, primals_1, primals_2, primals_4, buf1, buf3, buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from torch.nn import functional as F
import torch.onnx
from torch.optim.lr_scheduler import *
def composite_swish(inputs_1, inputs_2):
return inputs_1 * torch.sigmoid(inputs_2)
def swish(x):
return torch.sigmoid(x) * x
class _Conv2dSamePadding(nn.Conv2d):
""" Class implementing 2d adaptively padded convolutions """
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]
] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] +
1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] +
1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
class SELayer(nn.Module):
"""
Class impements the squeeze and excitation layer
"""
def __init__(self, in_channels, se_ratio):
super(SELayer, self).__init__()
assert se_ratio <= 1.0 and se_ratio >= 0.0, 'se_ratio should be in [0,1]'
num_squeezed_channels = max(1, int(in_channels * se_ratio))
self._se_reduce = _Conv2dSamePadding(in_channels,
num_squeezed_channels, 1)
self._se_expand = _Conv2dSamePadding(num_squeezed_channels,
in_channels, 1)
def forward(self, inputs):
inputs_squeezed = F.adaptive_avg_pool2d(inputs, 1)
inputs_squeezed = self._se_expand(swish(self._se_reduce(
inputs_squeezed)))
return composite_swish(inputs, inputs_squeezed)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'se_ratio': 0}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
from torch.nn import functional as F
import torch.onnx
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp3
tl.store(in_out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused_convolution_mul_sigmoid_1[grid(4)](buf3, primals_3,
buf4, 4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(16)](buf6, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf6, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf7, primals_1, primals_2, primals_4, buf1, buf3, buf4, buf6
def composite_swish(inputs_1, inputs_2):
return inputs_1 * torch.sigmoid(inputs_2)
def swish(x):
return torch.sigmoid(x) * x
class _Conv2dSamePadding(nn.Conv2d):
""" Class implementing 2d adaptively padded convolutions """
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]
] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] +
1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] +
1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
class SELayerNew(nn.Module):
"""
Class impements the squeeze and excitation layer
"""
def __init__(self, in_channels, se_ratio):
super(SELayerNew, self).__init__()
assert se_ratio <= 1.0 and se_ratio >= 0.0, 'se_ratio should be in [0,1]'
num_squeezed_channels = max(1, int(in_channels * se_ratio))
self._se_reduce = _Conv2dSamePadding(in_channels,
num_squeezed_channels, 1)
self._se_expand = _Conv2dSamePadding(num_squeezed_channels,
in_channels, 1)
def forward(self, input_0):
primals_2 = self._se_reduce.weight
primals_3 = self._se_reduce.bias
primals_4 = self._se_expand.weight
primals_5 = self._se_expand.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| IST-DASLab/ACDC | SELayer | false | 17,432 | [
"Apache-2.0"
] | 6 | ac53210b6adc1f2506ff909de08172ed9cad25d5 | https://github.com/IST-DASLab/ACDC/tree/ac53210b6adc1f2506ff909de08172ed9cad25d5 |
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/qr/cqrdftslct6rkuxoqhrjdiqpzcax2cjhkp2wmjmps7s5wyhlgftt.py
# Topologically Sorted Source Nodes: [mean, var, sub, add, sqrt, x, mul, add_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# sqrt => sqrt
# sub => sub
# var => var
# x => div
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1, 2, 3], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 1, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 4), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_sqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + (0))
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp31 = tl.load(in_ptr2 + (0))
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 4.0
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp28 = tmp0 - tmp20
tmp29 = tmp28 / tmp25
tmp30 = tmp27 * tmp29
tmp33 = tmp30 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
tl.store(out_ptr0 + (r1 + (64*x0)), tmp33, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf3 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, sub, add, sqrt, x, mul, add_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0.run(buf1, buf5, primals_1, primals_2, primals_3, buf6, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_3
return (buf6, primals_1, buf1, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from typing import *
class LayerNorm(nn.Module):
"""Normalize by channels, height and width for images."""
__constants__ = ['eps']
def __init__(self, eps):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1))
self.beta = nn.Parameter(torch.zeros(1))
def forward(self, x):
mean = x.mean((1, 2, 3), keepdim=True)
var = x.var((1, 2, 3), keepdim=True)
x = (x - mean) / (var + self.eps).sqrt()
return self.gamma * x + self.beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'eps': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + 0)
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp31 = tl.load(in_ptr2 + 0)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 4.0
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp28 = tmp0 - tmp20
tmp29 = tmp28 / tmp25
tmp30 = tmp27 * tmp29
tmp33 = tmp30 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp33, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0[grid(4)](buf1,
buf5, primals_1, primals_2, primals_3, buf6, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, buf1, buf5
class LayerNormNew(nn.Module):
"""Normalize by channels, height and width for images."""
__constants__ = ['eps']
def __init__(self, eps):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1))
self.beta = nn.Parameter(torch.zeros(1))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ImadDabbura/fastai-courses | LayerNorm | false | 17,433 | [
"Apache-2.0"
] | 3 | 053637a2dd3b4ad6c35f97a13f3fba87af1d3940 | https://github.com/ImadDabbura/fastai-courses/tree/053637a2dd3b4ad6c35f97a13f3fba87af1d3940 |
InstanceNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nh/cnh7inhq3mfebgqon3lqnxvabb5b7x5dmqrc3gk6dpmfgilgrkjs.py
# Topologically Sorted Source Nodes: [mean, var, sub, add, sqrt, x, mul, add_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# sqrt => sqrt
# sub => sub
# var => var
# x => div
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [2, 3]), kwargs = {correction: 1, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 4), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_sqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 4.0
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp31 = tmp29 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf3 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, sub, add, sqrt, x, mul, add_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0.run(buf1, buf5, primals_1, primals_2, primals_3, buf6, 16, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
return (buf6, primals_1, buf1, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from typing import *
class InstanceNorm(nn.Module):
"""Normalize by height and width for images."""
__constants__ = ['eps']
def __init__(self, nf, mom, eps):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(nf, 1, 1))
self.beta = nn.Parameter(torch.zeros(nf, 1, 1))
def forward(self, x):
mean = x.mean((2, 3), keepdim=True)
var = x.var((2, 3), keepdim=True)
x = (x - mean) / (var + self.eps).sqrt()
return self.gamma * x + self.beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nf': 4, 'mom': 4, 'eps': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 4.0
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp31 = tmp29 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0[grid(16)](buf1,
buf5, primals_1, primals_2, primals_3, buf6, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, buf1, buf5
class InstanceNormNew(nn.Module):
"""Normalize by height and width for images."""
__constants__ = ['eps']
def __init__(self, nf, mom, eps):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(nf, 1, 1))
self.beta = nn.Parameter(torch.zeros(nf, 1, 1))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ImadDabbura/fastai-courses | InstanceNorm | false | 17,434 | [
"Apache-2.0"
] | 3 | 053637a2dd3b4ad6c35f97a13f3fba87af1d3940 | https://github.com/ImadDabbura/fastai-courses/tree/053637a2dd3b4ad6c35f97a13f3fba87af1d3940 |
layer_basic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/i5/ci5qfysfziwnvpxydmo2qffqusg2zngyg5m6qtcxmfic37xtfnpy.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze, %unsqueeze, %unsqueeze], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp12
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp19, tmp27, tmp28)
tmp30 = tmp0 >= tmp17
tmp31 = tl.full([1], 3, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp40 = tmp38 + tmp39
tmp41 = tmp40 * tmp12
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp33, tmp41, tmp42)
tmp44 = tmp0 >= tmp31
tmp45 = tl.full([1], 4, tl.int64)
tmp46 = tmp0 < tmp45
tmp47 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp53 = tmp51 + tmp52
tmp54 = tmp53 * tmp12
tmp55 = tl.full(tmp54.shape, 0.0, tmp54.dtype)
tmp56 = tl.where(tmp44, tmp54, tmp55)
tmp57 = tl.where(tmp33, tmp43, tmp56)
tmp58 = tl.where(tmp19, tmp29, tmp57)
tmp59 = tl.where(tmp4, tmp15, tmp58)
tl.store(out_ptr0 + (x3 + (64*x2)), tmp59, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/w7/cw7u6to4cz5qbjckbaanejzzdgc54buvy7jurelgqqf2djgxj3ag.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_4, %unsqueeze_4, %unsqueeze_4, %unsqueeze_4], 3), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x2 = (xindex // 16)
x4 = xindex % 16
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + (4*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (4*x3), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr0 + (1 + (4*x3)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr0 + (2 + (4*x3)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.load(in_ptr0 + (3 + (4*x3)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp12
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp19, tmp27, tmp28)
tmp30 = tmp0 >= tmp17
tmp31 = tl.full([1], 3, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + (4*x3), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (1 + (4*x3)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr0 + (2 + (4*x3)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr0 + (3 + (4*x3)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp40 = tmp38 + tmp39
tmp41 = tmp40 * tmp12
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp33, tmp41, tmp42)
tmp44 = tmp0 >= tmp31
tmp45 = tl.full([1], 4, tl.int64)
tmp46 = tmp0 < tmp45
tmp47 = tl.load(in_ptr0 + (4*x3), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tl.load(in_ptr0 + (1 + (4*x3)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tl.load(in_ptr0 + (2 + (4*x3)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.load(in_ptr0 + (3 + (4*x3)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp53 = tmp51 + tmp52
tmp54 = tmp53 * tmp12
tmp55 = tl.full(tmp54.shape, 0.0, tmp54.dtype)
tmp56 = tl.where(tmp44, tmp54, tmp55)
tmp57 = tl.where(tmp33, tmp43, tmp56)
tmp58 = tl.where(tmp19, tmp29, tmp57)
tmp59 = tl.where(tmp4, tmp15, tmp58)
tl.store(out_ptr0 + (x4 + (64*x2)), tmp59, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xz/cxz6xar2irzicn2limgguvfae4ytvbsk5vjcoj62lld536ybpk3t.py
# Topologically Sorted Source Nodes: [float_dim, sum_2, sum_of_rows, sum_3], Original ATen: [aten._to_copy, aten.sum, aten.div]
# Source node to ATen node mapping:
# float_dim => full_default
# sum_2 => sum_2
# sum_3 => sum_3
# sum_of_rows => div_1
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 4.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [3]), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %full_default), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_1, [2]), kwargs = {})
triton_poi_fused__to_copy_div_sum_2 = async_compile.triton('triton_poi_fused__to_copy_div_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_div_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 * tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 * tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 * tmp7
tmp35 = tmp26 + tmp34
tl.store(out_ptr0 + (x0), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/an/cano2c63koi4ji3fc2ijsfisk7xfu54xlgsubrg4z4curbph5wgh.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_12, %unsqueeze_12, %unsqueeze_12, %unsqueeze_12], 3), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex % 16
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp7 & tmp4
tmp9 = tl.load(in_ptr0 + (x2), tmp8 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = 0.0625
tmp11 = tmp9 * tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp8, tmp11, tmp12)
tmp14 = tmp5 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp5 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tmp17 & tmp4
tmp19 = tl.load(in_ptr0 + (x2), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 * tmp10
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp18, tmp20, tmp21)
tmp23 = tmp5 >= tmp15
tmp24 = tl.full([1], 3, tl.int64)
tmp25 = tmp5 < tmp24
tmp26 = tmp23 & tmp25
tmp27 = tmp26 & tmp4
tmp28 = tl.load(in_ptr0 + (x2), tmp27 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp28 * tmp10
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp27, tmp29, tmp30)
tmp32 = tmp5 >= tmp24
tmp33 = tl.full([1], 4, tl.int64)
tmp34 = tmp5 < tmp33
tmp35 = tmp32 & tmp4
tmp36 = tl.load(in_ptr0 + (x2), tmp35 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp36 * tmp10
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp35, tmp37, tmp38)
tmp40 = tl.where(tmp26, tmp31, tmp39)
tmp41 = tl.where(tmp17, tmp22, tmp40)
tmp42 = tl.where(tmp7, tmp13, tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp4, tmp42, tmp43)
tmp45 = tmp0 >= tmp3
tmp46 = tmp0 < tmp15
tmp47 = tmp45 & tmp46
tmp48 = tmp7 & tmp47
tmp49 = tl.load(in_ptr0 + (x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tmp49 * tmp10
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp48, tmp50, tmp51)
tmp53 = tmp17 & tmp47
tmp54 = tl.load(in_ptr0 + (x2), tmp53 & xmask, eviction_policy='evict_last', other=0.0)
tmp55 = tmp54 * tmp10
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp53, tmp55, tmp56)
tmp58 = tmp26 & tmp47
tmp59 = tl.load(in_ptr0 + (x2), tmp58 & xmask, eviction_policy='evict_last', other=0.0)
tmp60 = tmp59 * tmp10
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp58, tmp60, tmp61)
tmp63 = tmp32 & tmp47
tmp64 = tl.load(in_ptr0 + (x2), tmp63 & xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tmp64 * tmp10
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp63, tmp65, tmp66)
tmp68 = tl.where(tmp26, tmp62, tmp67)
tmp69 = tl.where(tmp17, tmp57, tmp68)
tmp70 = tl.where(tmp7, tmp52, tmp69)
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp47, tmp70, tmp71)
tmp73 = tmp0 >= tmp15
tmp74 = tmp0 < tmp24
tmp75 = tmp73 & tmp74
tmp76 = tmp7 & tmp75
tmp77 = tl.load(in_ptr0 + (x2), tmp76 & xmask, eviction_policy='evict_last', other=0.0)
tmp78 = tmp77 * tmp10
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp76, tmp78, tmp79)
tmp81 = tmp17 & tmp75
tmp82 = tl.load(in_ptr0 + (x2), tmp81 & xmask, eviction_policy='evict_last', other=0.0)
tmp83 = tmp82 * tmp10
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp81, tmp83, tmp84)
tmp86 = tmp26 & tmp75
tmp87 = tl.load(in_ptr0 + (x2), tmp86 & xmask, eviction_policy='evict_last', other=0.0)
tmp88 = tmp87 * tmp10
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp86, tmp88, tmp89)
tmp91 = tmp32 & tmp75
tmp92 = tl.load(in_ptr0 + (x2), tmp91 & xmask, eviction_policy='evict_last', other=0.0)
tmp93 = tmp92 * tmp10
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp91, tmp93, tmp94)
tmp96 = tl.where(tmp26, tmp90, tmp95)
tmp97 = tl.where(tmp17, tmp85, tmp96)
tmp98 = tl.where(tmp7, tmp80, tmp97)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp75, tmp98, tmp99)
tmp101 = tmp0 >= tmp24
tmp102 = tmp0 < tmp33
tmp103 = tmp7 & tmp101
tmp104 = tl.load(in_ptr0 + (x2), tmp103 & xmask, eviction_policy='evict_last', other=0.0)
tmp105 = tmp104 * tmp10
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp103, tmp105, tmp106)
tmp108 = tmp17 & tmp101
tmp109 = tl.load(in_ptr0 + (x2), tmp108 & xmask, eviction_policy='evict_last', other=0.0)
tmp110 = tmp109 * tmp10
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp108, tmp110, tmp111)
tmp113 = tmp26 & tmp101
tmp114 = tl.load(in_ptr0 + (x2), tmp113 & xmask, eviction_policy='evict_last', other=0.0)
tmp115 = tmp114 * tmp10
tmp116 = tl.full(tmp115.shape, 0.0, tmp115.dtype)
tmp117 = tl.where(tmp113, tmp115, tmp116)
tmp118 = tmp32 & tmp101
tmp119 = tl.load(in_ptr0 + (x2), tmp118 & xmask, eviction_policy='evict_last', other=0.0)
tmp120 = tmp119 * tmp10
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp118, tmp120, tmp121)
tmp123 = tl.where(tmp26, tmp117, tmp122)
tmp124 = tl.where(tmp17, tmp112, tmp123)
tmp125 = tl.where(tmp7, tmp107, tmp124)
tmp126 = tl.full(tmp125.shape, 0.0, tmp125.dtype)
tmp127 = tl.where(tmp101, tmp125, tmp126)
tmp128 = tl.where(tmp75, tmp100, tmp127)
tmp129 = tl.where(tmp47, tmp72, tmp128)
tmp130 = tl.where(tmp4, tmp44, tmp129)
tl.store(out_ptr0 + (x3 + (64*x2)), tmp130, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3h/c3h3pqeh6czn4kaurjzb63fokvwqfw2vchzytl7opq7jhry7ogdf.py
# Topologically Sorted Source Nodes: [ops_out], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# ops_out => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %cat, %cat_1, %cat_3], 2), kwargs = {})
triton_poi_fused_stack_4 = async_compile.triton('triton_poi_fused_stack_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (64*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/oc/cochykb72igchjmi4n57racfpq7bcoz6eofzwvf3gtskdnyesyrg.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# output => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_1, [5], True), kwargs = {})
triton_poi_fused_sum_5 = async_compile.triton('triton_poi_fused_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y0) + (256*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (64 + x2 + (16*y0) + (256*y1)), xmask & ymask)
tmp3 = tl.load(in_ptr0 + (128 + x2 + (16*y0) + (256*y1)), xmask & ymask)
tmp5 = tl.load(in_ptr0 + (192 + x2 + (16*y0) + (256*y1)), xmask & ymask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/y4/cy4fcwo3mwdm7p7utv4cld6xr6mkzmitwz4kqy7cvamvrtnsi6q4.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_3), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((4, 4, 16, 4), (256, 64, 4, 1), torch.float32)
buf0 = reinterpret_tensor(buf5, (4, 4, 4, 4), (256, 64, 4, 1), 16) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = reinterpret_tensor(buf5, (4, 4, 4, 4), (256, 64, 4, 1), 32) # alias
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_1, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [float_dim, sum_2, sum_of_rows, sum_3], Original ATen: [aten._to_copy, aten.sum, aten.div]
triton_poi_fused__to_copy_div_sum_2.run(primals_1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = reinterpret_tensor(buf5, (4, 4, 4, 4), (256, 64, 4, 1), 48) # alias
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf2
buf4 = reinterpret_tensor(buf5, (4, 4, 4, 4), (256, 64, 4, 1), 0) # alias
# Topologically Sorted Source Nodes: [ops_out], Original ATen: [aten.stack]
triton_poi_fused_stack_4.run(primals_1, buf4, 256, grid=grid(256), stream=stream0)
del primals_1
buf6 = empty_strided_cuda((4, 1, 4, 4, 4, 1), (64, 64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sum]
triton_poi_fused_sum_5.run(buf5, buf6, 16, 16, grid=grid(16, 16), stream=stream0)
del buf0
del buf1
del buf3
del buf4
del buf5
buf7 = empty_strided_cuda((1, 1, 64), (64, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, reinterpret_tensor(buf6, (1, 4, 64), (0, 1, 4), 0), out=buf7)
del primals_2
buf8 = reinterpret_tensor(buf7, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf8, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf8, reinterpret_tensor(buf6, (1, 64, 4), (4, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class layer_basic(nn.Module):
"""
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m x m tensor
:return: output: N x S x m x m tensor
"""
def __init__(self, input_depth, output_depth, normalization='inf',
normalization_val=1.0, device='cpu'):
super().__init__()
self.input_depth = input_depth
self.output_depth = output_depth
self.normalization = normalization
self.normalization_val = normalization_val
self.device = device
self.basis_dimension = 4
self.coeffs = torch.nn.Parameter(torch.randn(self.input_depth, self
.output_depth, self.basis_dimension) * np.sqrt(2.0) / (self.
input_depth + self.output_depth), requires_grad=True)
self.bias = torch.nn.Parameter(torch.zeros(1, self.output_depth, 1, 1))
def forward(self, inputs):
m = inputs.size(3)
float_dim = np.float32(m)
ops_out = []
ops_out.append(inputs)
sum_of_cols = torch.sum(inputs, dim=2) / float_dim
ops_out.append(torch.cat([sum_of_cols.unsqueeze(dim=2) for i in
range(m)], dim=2))
sum_of_rows = torch.sum(inputs, dim=3) / float_dim
ops_out.append(torch.cat([sum_of_rows.unsqueeze(dim=3) for i in
range(m)], dim=3))
sum_all = torch.sum(sum_of_rows, dim=2) / float_dim ** 2
out = torch.cat([sum_all.unsqueeze(dim=2) for i in range(m)], dim=2)
ops_out.append(torch.cat([out.unsqueeze(dim=3) for i in range(m)],
dim=3))
ops_out = torch.stack(ops_out, dim=2)
output = torch.einsum('dsb,ndbij->nsij', self.coeffs, ops_out)
output = output + self.bias
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_depth': 1, 'output_depth': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp12
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp19, tmp27, tmp28)
tmp30 = tmp0 >= tmp17
tmp31 = tl.full([1], 3, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tmp38 + tmp39
tmp41 = tmp40 * tmp12
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp33, tmp41, tmp42)
tmp44 = tmp0 >= tmp31
tl.full([1], 4, tl.int64)
tmp47 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp44 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), tmp44 & xmask,
eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), tmp44 & xmask,
eviction_policy='evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp44 & xmask,
eviction_policy='evict_last', other=0.0)
tmp53 = tmp51 + tmp52
tmp54 = tmp53 * tmp12
tmp55 = tl.full(tmp54.shape, 0.0, tmp54.dtype)
tmp56 = tl.where(tmp44, tmp54, tmp55)
tmp57 = tl.where(tmp33, tmp43, tmp56)
tmp58 = tl.where(tmp19, tmp29, tmp57)
tmp59 = tl.where(tmp4, tmp15, tmp58)
tl.store(out_ptr0 + (x3 + 64 * x2), tmp59, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x2 = xindex // 16
x4 = xindex % 16
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + 4 * x3, tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.load(in_ptr0 + (1 + 4 * x3), tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr0 + (2 + 4 * x3), tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.load(in_ptr0 + (3 + 4 * x3), tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp12
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp19, tmp27, tmp28)
tmp30 = tmp0 >= tmp17
tmp31 = tl.full([1], 3, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + 4 * x3, tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (1 + 4 * x3), tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr0 + (2 + 4 * x3), tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr0 + (3 + 4 * x3), tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp40 = tmp38 + tmp39
tmp41 = tmp40 * tmp12
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp33, tmp41, tmp42)
tmp44 = tmp0 >= tmp31
tl.full([1], 4, tl.int64)
tmp47 = tl.load(in_ptr0 + 4 * x3, tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp48 = tl.load(in_ptr0 + (1 + 4 * x3), tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tl.load(in_ptr0 + (2 + 4 * x3), tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.load(in_ptr0 + (3 + 4 * x3), tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp53 = tmp51 + tmp52
tmp54 = tmp53 * tmp12
tmp55 = tl.full(tmp54.shape, 0.0, tmp54.dtype)
tmp56 = tl.where(tmp44, tmp54, tmp55)
tmp57 = tl.where(tmp33, tmp43, tmp56)
tmp58 = tl.where(tmp19, tmp29, tmp57)
tmp59 = tl.where(tmp4, tmp15, tmp58)
tl.store(out_ptr0 + (x4 + 64 * x2), tmp59, xmask)
@triton.jit
def triton_poi_fused__to_copy_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 * tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 * tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 * tmp7
tmp35 = tmp26 + tmp34
tl.store(out_ptr0 + x0, tmp35, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp7 = tmp5 < tmp3
tmp8 = tmp7 & tmp4
tmp9 = tl.load(in_ptr0 + x2, tmp8 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = 0.0625
tmp11 = tmp9 * tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp8, tmp11, tmp12)
tmp14 = tmp5 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp5 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tmp17 & tmp4
tmp19 = tl.load(in_ptr0 + x2, tmp18 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp19 * tmp10
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp18, tmp20, tmp21)
tmp23 = tmp5 >= tmp15
tmp24 = tl.full([1], 3, tl.int64)
tmp25 = tmp5 < tmp24
tmp26 = tmp23 & tmp25
tmp27 = tmp26 & tmp4
tmp28 = tl.load(in_ptr0 + x2, tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tmp28 * tmp10
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp27, tmp29, tmp30)
tmp32 = tmp5 >= tmp24
tl.full([1], 4, tl.int64)
tmp35 = tmp32 & tmp4
tmp36 = tl.load(in_ptr0 + x2, tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp36 * tmp10
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp35, tmp37, tmp38)
tmp40 = tl.where(tmp26, tmp31, tmp39)
tmp41 = tl.where(tmp17, tmp22, tmp40)
tmp42 = tl.where(tmp7, tmp13, tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp4, tmp42, tmp43)
tmp45 = tmp0 >= tmp3
tmp46 = tmp0 < tmp15
tmp47 = tmp45 & tmp46
tmp48 = tmp7 & tmp47
tmp49 = tl.load(in_ptr0 + x2, tmp48 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp50 = tmp49 * tmp10
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp48, tmp50, tmp51)
tmp53 = tmp17 & tmp47
tmp54 = tl.load(in_ptr0 + x2, tmp53 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp55 = tmp54 * tmp10
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp53, tmp55, tmp56)
tmp58 = tmp26 & tmp47
tmp59 = tl.load(in_ptr0 + x2, tmp58 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp60 = tmp59 * tmp10
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp58, tmp60, tmp61)
tmp63 = tmp32 & tmp47
tmp64 = tl.load(in_ptr0 + x2, tmp63 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp65 = tmp64 * tmp10
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp63, tmp65, tmp66)
tmp68 = tl.where(tmp26, tmp62, tmp67)
tmp69 = tl.where(tmp17, tmp57, tmp68)
tmp70 = tl.where(tmp7, tmp52, tmp69)
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp47, tmp70, tmp71)
tmp73 = tmp0 >= tmp15
tmp74 = tmp0 < tmp24
tmp75 = tmp73 & tmp74
tmp76 = tmp7 & tmp75
tmp77 = tl.load(in_ptr0 + x2, tmp76 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp78 = tmp77 * tmp10
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp76, tmp78, tmp79)
tmp81 = tmp17 & tmp75
tmp82 = tl.load(in_ptr0 + x2, tmp81 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp83 = tmp82 * tmp10
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp81, tmp83, tmp84)
tmp86 = tmp26 & tmp75
tmp87 = tl.load(in_ptr0 + x2, tmp86 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp88 = tmp87 * tmp10
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp86, tmp88, tmp89)
tmp91 = tmp32 & tmp75
tmp92 = tl.load(in_ptr0 + x2, tmp91 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp93 = tmp92 * tmp10
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp91, tmp93, tmp94)
tmp96 = tl.where(tmp26, tmp90, tmp95)
tmp97 = tl.where(tmp17, tmp85, tmp96)
tmp98 = tl.where(tmp7, tmp80, tmp97)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp75, tmp98, tmp99)
tmp101 = tmp0 >= tmp24
tmp103 = tmp7 & tmp101
tmp104 = tl.load(in_ptr0 + x2, tmp103 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp105 = tmp104 * tmp10
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp103, tmp105, tmp106)
tmp108 = tmp17 & tmp101
tmp109 = tl.load(in_ptr0 + x2, tmp108 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp110 = tmp109 * tmp10
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp108, tmp110, tmp111)
tmp113 = tmp26 & tmp101
tmp114 = tl.load(in_ptr0 + x2, tmp113 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp115 = tmp114 * tmp10
tmp116 = tl.full(tmp115.shape, 0.0, tmp115.dtype)
tmp117 = tl.where(tmp113, tmp115, tmp116)
tmp118 = tmp32 & tmp101
tmp119 = tl.load(in_ptr0 + x2, tmp118 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp120 = tmp119 * tmp10
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp118, tmp120, tmp121)
tmp123 = tl.where(tmp26, tmp117, tmp122)
tmp124 = tl.where(tmp17, tmp112, tmp123)
tmp125 = tl.where(tmp7, tmp107, tmp124)
tmp126 = tl.full(tmp125.shape, 0.0, tmp125.dtype)
tmp127 = tl.where(tmp101, tmp125, tmp126)
tmp128 = tl.where(tmp75, tmp100, tmp127)
tmp129 = tl.where(tmp47, tmp72, tmp128)
tmp130 = tl.where(tmp4, tmp44, tmp129)
tl.store(out_ptr0 + (x3 + 64 * x2), tmp130, xmask)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_sum_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y0 + 256 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (64 + x2 + 16 * y0 + 256 * y1), xmask & ymask)
tmp3 = tl.load(in_ptr0 + (128 + x2 + 16 * y0 + 256 * y1), xmask & ymask)
tmp5 = tl.load(in_ptr0 + (192 + x2 + 16 * y0 + 256 * y1), xmask & ymask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((4, 4, 16, 4), (256, 64, 4, 1), torch.float32
)
buf0 = reinterpret_tensor(buf5, (4, 4, 4, 4), (256, 64, 4, 1), 16)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = reinterpret_tensor(buf5, (4, 4, 4, 4), (256, 64, 4, 1), 32)
triton_poi_fused_cat_1[grid(256)](primals_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__to_copy_div_sum_2[grid(16)](primals_1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf5, (4, 4, 4, 4), (256, 64, 4, 1), 48)
triton_poi_fused_cat_3[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
buf4 = reinterpret_tensor(buf5, (4, 4, 4, 4), (256, 64, 4, 1), 0)
triton_poi_fused_stack_4[grid(256)](primals_1, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_1
buf6 = empty_strided_cuda((4, 1, 4, 4, 4, 1), (64, 64, 16, 4, 1, 1),
torch.float32)
triton_poi_fused_sum_5[grid(16, 16)](buf5, buf6, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del buf0
del buf1
del buf3
del buf4
del buf5
buf7 = empty_strided_cuda((1, 1, 64), (64, 64, 1), torch.float32)
extern_kernels.bmm(primals_2, reinterpret_tensor(buf6, (1, 4, 64),
(0, 1, 4), 0), out=buf7)
del primals_2
buf8 = reinterpret_tensor(buf7, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf7
triton_poi_fused_add_6[grid(64)](buf8, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf8, reinterpret_tensor(buf6, (1, 64, 4), (4, 4, 1), 0)
class layer_basicNew(nn.Module):
"""
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m x m tensor
:return: output: N x S x m x m tensor
"""
def __init__(self, input_depth, output_depth, normalization='inf',
normalization_val=1.0, device='cpu'):
super().__init__()
self.input_depth = input_depth
self.output_depth = output_depth
self.normalization = normalization
self.normalization_val = normalization_val
self.device = device
self.basis_dimension = 4
self.coeffs = torch.nn.Parameter(torch.randn(self.input_depth, self
.output_depth, self.basis_dimension) * np.sqrt(2.0) / (self.
input_depth + self.output_depth), requires_grad=True)
self.bias = torch.nn.Parameter(torch.zeros(1, self.output_depth, 1, 1))
def forward(self, input_0):
primals_2 = self.coeffs
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| HyTruongSon/InvariantGraphNetworks-PyTorch | layer_basic | false | 17,435 | [
"Apache-2.0"
] | 7 | da9fdaa4f858d6fcae14b08a59d4b172a2aabaf8 | https://github.com/HyTruongSon/InvariantGraphNetworks-PyTorch/tree/da9fdaa4f858d6fcae14b08a59d4b172a2aabaf8 |
SamePad2dStrong | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/aa/caalpmdmlvzd2ux6wytsi3erceohydfj334h4hrd4jkdqlfwi6ki.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 2, 1, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 7) % 7
x0 = xindex % 7
x2 = (xindex // 49)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 784, grid=grid(784), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class SamePad2dStrong(nn.Module):
"""Mimics tensorflow's 'SAME' padding.
"""
def __init__(self, kernel_size, stride):
super(SamePad2dStrong, self).__init__()
self.kernel_size = torch.nn.modules.utils._pair(kernel_size)
self.stride = torch.nn.modules.utils._pair(stride)
def forward(self, input):
in_width = input.size()[2]
in_height = input.size()[3]
out_width = math.ceil(float(in_width) / float(self.stride[0]))
out_height = math.ceil(float(in_height) / float(self.stride[1]))
pad_along_width = (out_width - 1) * self.stride[0] + self.kernel_size[0
] - in_width
pad_along_height = (out_height - 1) * self.stride[1
] + self.kernel_size[1] - in_height
pad_left = math.floor(pad_along_width / 2)
pad_top = math.floor(pad_along_height / 2)
pad_right = pad_along_width - pad_left
pad_bottom = pad_along_height - pad_top
return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom),
'constant', 0)
def __repr__(self):
return self.__class__.__name__
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4, 'stride': 1}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](arg0_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SamePad2dStrongNew(nn.Module):
"""Mimics tensorflow's 'SAME' padding.
"""
def __init__(self, kernel_size, stride):
super(SamePad2dStrongNew, self).__init__()
self.kernel_size = torch.nn.modules.utils._pair(kernel_size)
self.stride = torch.nn.modules.utils._pair(stride)
def __repr__(self):
return self.__class__.__name__
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| IssamLaradji/wisenet | SamePad2dStrong | false | 17,436 | [
"Apache-2.0"
] | 7 | 881457f5168815f5e9d03f110244842d539747a0 | https://github.com/IssamLaradji/wisenet/tree/881457f5168815f5e9d03f110244842d539747a0 |
InstrShifting | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/33/c33mxoi7mcgjxewycg6brb6odrmvpt4xeo3nrym45e3fb5wk4cef.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_for_fused_0 = async_compile.triton('triton_for_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.foreach(
num_warps=8,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'kernel_name': 'triton_for_fused_0', 'mutated_arg_names': [], 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
)
@triton.jit
def triton_for_fused_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1):
pid = tl.program_id(0)
XBLOCK: tl.constexpr = 1024
num_xblocks_0 = tl.cdiv(16, XBLOCK)
num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK)
if pid < num_xblocks_0:
pid_offset = pid
xnumel = 16
rnumel = 1
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (12*x1)), tmp0, xmask)
elif pid < num_xblocks_1:
pid_offset = pid - num_xblocks_0
xnumel = 16
rnumel = 1
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x3 = xindex % 4
x4 = (xindex // 4)
tmp1 = tl.load(in_ptr1 + (x5), xmask)
tl.store(out_ptr1 + (x3 + (12*x4)), tmp1, xmask)
else:
pass
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/e3/ce3jmt47de34hvsc6lu3bhaeehrvb3w4w2fo3agwspxv3pzwkjek.py
# Topologically Sorted Source Nodes: [sigmoid, tanh, h_t_c], Original ATen: [aten.sigmoid, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# h_t_c => mul
# sigmoid => sigmoid
# tanh => tanh
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mm_1,), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_6,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
triton_poi_fused_mul_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + (x0 + (8*x1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6p/c6padib4hy3udk624vldvmvblo7r6nhqljbkf3o5rum2fenyr4gv.py
# Topologically Sorted Source Nodes: [p_t_s], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
# Source node to ATen node mapping:
# p_t_s => sigmoid_1
# Graph fragment:
# %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mm_3,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub), kwargs = {})
triton_poi_fused_sigmoid_sigmoid_backward_2 = async_compile.triton('triton_poi_fused_sigmoid_sigmoid_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_sigmoid_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_2(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp1 * tmp3
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 12), (12, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (1, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4), (12, 1), 0) # alias
# Topologically Sorted Source Nodes: [proj_h], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf3, (4, 4), (12, 1), 4) # alias
buf2 = reinterpret_tensor(buf3, (4, 4), (12, 1), 8) # alias
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_for_fused_0.run(primals_3, primals_4, buf1, buf2, grid=(2, 1, 1), stream=stream0)
del primals_3
del primals_4
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (12, 4), (1, 12), 0), out=buf4)
buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf5 = reinterpret_tensor(buf7, (4, 4), (8, 1), 0) # alias
# Topologically Sorted Source Nodes: [proj_e], Original ATen: [aten.mm]
extern_kernels.mm(primals_8, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf5)
del primals_7
buf6 = reinterpret_tensor(buf7, (4, 4), (8, 1), 4) # alias
# Topologically Sorted Source Nodes: [sigmoid, tanh, h_t_c], Original ATen: [aten.sigmoid, aten.tanh, aten.mul]
triton_poi_fused_mul_sigmoid_tanh_1.run(buf4, primals_6, buf6, 16, grid=grid(16), stream=stream0)
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(buf7, reinterpret_tensor(primals_9, (8, 1), (1, 8), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_t_s], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
triton_poi_fused_sigmoid_sigmoid_backward_2.run(buf9, buf10, 4, grid=grid(4), stream=stream0)
return (reinterpret_tensor(buf9, (4, ), (1, ), 0), primals_1, primals_6, primals_8, buf3, buf4, buf7, buf10, primals_9, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class InstrShifting(nn.Module):
""" Sub-Instruction Shifting Module.
Decide whether the current subinstruction will
be completed by the next action or not. """
def __init__(self, rnn_hidden_size, shift_hidden_size, action_emb_size,
max_subinstr_size, drop_ratio):
super(InstrShifting, self).__init__()
self.drop = nn.Dropout(p=drop_ratio)
self.linear0 = nn.Linear(rnn_hidden_size, shift_hidden_size, bias=False
)
self.linear1 = nn.Linear(rnn_hidden_size + shift_hidden_size +
action_emb_size, shift_hidden_size, bias=False)
self.linear2 = nn.Linear(max_subinstr_size, shift_hidden_size, bias
=False)
self.linear3 = nn.Linear(2 * shift_hidden_size, 1, bias=False)
def forward(self, h_t, m_t, a_t_cur, weighted_ctx, e_t):
""" Propogate through the network.
:param h_t: torch.Tensor, batch x rnn_hidden_size
:param m_t: torch.Tensor, batch x rnn_hidden_size
:param a_t_cur: torch.Tensor, batch x action_emb_size
:param weighted_ctx: torch.Tensor, batch x rnn_hidden_size
:param e_t: torch.Tensor, batch x max_subinstr_size
"""
proj_h = self.linear0(self.drop(h_t))
concat_input = torch.cat((proj_h, a_t_cur, weighted_ctx), 1)
h_t_c = torch.sigmoid(self.linear1(concat_input)) * torch.tanh(m_t)
proj_e = self.linear2(e_t)
concat_input = torch.cat((proj_e, self.drop(h_t_c)), 1)
p_t_s = torch.sigmoid(self.linear3(concat_input))
return p_t_s.squeeze()
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'rnn_hidden_size': 4, 'shift_hidden_size': 4,
'action_emb_size': 4, 'max_subinstr_size': 4, 'drop_ratio': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_for_fused_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1):
pid = tl.program_id(0)
XBLOCK: tl.constexpr = 1024
num_xblocks_0 = tl.cdiv(16, XBLOCK)
num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK)
if pid < num_xblocks_0:
pid_offset = pid
xnumel = 16
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
elif pid < num_xblocks_1:
pid_offset = pid - num_xblocks_0
xnumel = 16
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x3 = xindex % 4
x4 = xindex // 4
tmp1 = tl.load(in_ptr1 + x5, xmask)
tl.store(out_ptr1 + (x3 + 12 * x4), tmp1, xmask)
else:
pass
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + (x0 + 8 * x1), tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_2(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp1 * tmp3
tl.store(in_out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 12), (12, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (1, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4), (12, 1), 0)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf3, (4, 4), (12, 1), 4)
buf2 = reinterpret_tensor(buf3, (4, 4), (12, 1), 8)
get_raw_stream(0)
triton_for_fused_0[2, 1, 1](primals_3, primals_4, buf1, buf2,
num_warps=8, num_stages=1)
del primals_3
del primals_4
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (12, 4), (1,
12), 0), out=buf4)
buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf5 = reinterpret_tensor(buf7, (4, 4), (8, 1), 0)
extern_kernels.mm(primals_8, reinterpret_tensor(primals_7, (4, 4),
(1, 4), 0), out=buf5)
del primals_7
buf6 = reinterpret_tensor(buf7, (4, 4), (8, 1), 4)
triton_poi_fused_mul_sigmoid_tanh_1[grid(16)](buf4, primals_6, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_9, (8, 1), (1, 8
), 0), out=buf8)
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused_sigmoid_sigmoid_backward_2[grid(4)](buf9, buf10, 4,
XBLOCK=4, num_warps=1, num_stages=1)
return (reinterpret_tensor(buf9, (4,), (1,), 0), primals_1, primals_6,
primals_8, buf3, buf4, buf7, buf10, primals_9, primals_5)
class InstrShiftingNew(nn.Module):
""" Sub-Instruction Shifting Module.
Decide whether the current subinstruction will
be completed by the next action or not. """
def __init__(self, rnn_hidden_size, shift_hidden_size, action_emb_size,
max_subinstr_size, drop_ratio):
super(InstrShiftingNew, self).__init__()
self.drop = nn.Dropout(p=drop_ratio)
self.linear0 = nn.Linear(rnn_hidden_size, shift_hidden_size, bias=False
)
self.linear1 = nn.Linear(rnn_hidden_size + shift_hidden_size +
action_emb_size, shift_hidden_size, bias=False)
self.linear2 = nn.Linear(max_subinstr_size, shift_hidden_size, bias
=False)
self.linear3 = nn.Linear(2 * shift_hidden_size, 1, bias=False)
def forward(self, input_0, input_1, input_2, input_3, input_4):
primals_1 = self.linear0.weight
primals_5 = self.linear1.weight
primals_2 = self.linear2.weight
primals_9 = self.linear3.weight
primals_3 = input_0
primals_4 = input_1
primals_6 = input_2
primals_7 = input_3
primals_8 = input_4
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| IMNearth/Curriculum-Learning-For-VLN | InstrShifting | false | 17,437 | [
"MIT"
] | 8 | d2fe1286eb295dc8c63a0c886b35883f32481d85 | https://github.com/IMNearth/Curriculum-Learning-For-VLN/tree/d2fe1286eb295dc8c63a0c886b35883f32481d85 |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/7m/c7m25mks7iqhmv2aiadogxxp6cpxkpgxkxelv4rozuqdg44rckt6.py
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# ground_o => sum_2
# intersection => sum_1
# mul => mul
# pred_o => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2, 3]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [2, 3]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/dw/cdwuuem6tv33mv7a7hacein5rzti6ogglqzwathpuld46bmbdvx5.py
# Topologically Sorted Source Nodes: [mul_1, add_1, denominator, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# denominator => add
# f => sub
# f_1 => mean
# mul_1 => mul_1
# truediv => div
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-05), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp9
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = 16.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [mul_1, add_1, denominator, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean]
triton_per_fused_add_div_mean_mul_rsub_1.run(buf4, buf0, buf1, buf2, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import collections
import torch
import warnings
from typing import Optional
from typing import Union
from typing import Callable
from typing import Any
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndimension() < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class DiceLoss(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction.
MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch:
'bool'=False) ->None:
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `softmax=True` ignored.')
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `to_onehot_y=True` ignored.')
else:
target = one_hot(target, num_classes=n_pred_ch)
if not self.include_background:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `include_background=False` ignored.'
)
else:
target = target[:, 1:]
input = input[:, 1:]
assert target.shape == input.shape, f'ground truth has differing shape ({target.shape}) from input ({input.shape})'
reduce_axis = list(range(2, len(input.shape)))
if self.batch:
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, dim=reduce_axis)
if self.squared_pred:
target = torch.pow(target, 2)
input = torch.pow(input, 2)
ground_o = torch.sum(target, dim=reduce_axis)
pred_o = torch.sum(input, dim=reduce_axis)
denominator = ground_o + pred_o
if self.jaccard:
denominator = 2.0 * (denominator - intersection)
f: 'torch.Tensor' = 1.0 - (2.0 * intersection + self.smooth_nr) / (
denominator + self.smooth_dr)
if self.reduction == LossReduction.MEAN.value:
f = torch.mean(f)
elif self.reduction == LossReduction.SUM.value:
f = torch.sum(f)
elif self.reduction == LossReduction.NONE.value:
pass
else:
raise ValueError(
f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].'
)
return f
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import collections
from typing import Optional
from typing import Union
from typing import Callable
from typing import Any
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp9
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = 16.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(16)](arg1_1, arg0_1, buf0, buf1,
buf2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndimension() < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class DiceLossNew(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction.
MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch:
'bool'=False) ->None:
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Irme/MONAI | DiceLoss | false | 17,438 | [
"Apache-2.0"
] | 3 | dc4bf661831b14f4231cb325cc1b15d38c1e406c | https://github.com/Irme/MONAI/tree/dc4bf661831b14f4231cb325cc1b15d38c1e406c |
BCEFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/43/c43xwr2h7urzsv2w7tzhfvkdgwxv7jzign54ixo5gsvb6pwa4qal.py
# Topologically Sorted Source Nodes: [sub, pow_1, mul, mul_1, log, mul_2, pow_2, mul_3, sub_1, mul_4, sub_2, log_1, mul_5, loss, loss_1], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.log, aten.sub, aten.mean]
# Source node to ATen node mapping:
# log => log
# log_1 => log_1
# loss => sub_3
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -0.25), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg1_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 0.75), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %log_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
triton_per_fused_log_mean_mul_pow_rsub_sub_0 = async_compile.triton('triton_per_fused_log_mean_mul_pow_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log_mean_mul_pow_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_log_mean_mul_pow_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tmp2 * tmp2
tmp4 = -0.25
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp8 = tl_math.log(tmp0)
tmp9 = tmp7 * tmp8
tmp10 = tmp0 * tmp0
tmp11 = 0.75
tmp12 = tmp10 * tmp11
tmp13 = tmp1 - tmp6
tmp14 = tmp12 * tmp13
tmp15 = tl_math.log(tmp2)
tmp16 = tmp14 * tmp15
tmp17 = tmp9 - tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = 256.0
tmp22 = tmp20 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, mul, mul_1, log, mul_2, pow_2, mul_3, sub_1, mul_4, sub_2, log_1, mul_5, loss, loss_1], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.log, aten.sub, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_log_mean_mul_pow_rsub_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from sklearn import *
class BCEFocalLoss(torch.nn.Module):
"""
二分类的Focalloss alpha 固定
"""
def __init__(self, gamma=2, alpha=0.25, reduction='elementwise_mean'):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
def forward(self, _input, target):
pt = _input
alpha = self.alpha
loss = -alpha * (1 - pt) ** self.gamma * target * torch.log(pt) - (
1 - alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt)
if self.reduction == 'elementwise_mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
elif self.reduction == 'pos':
loss = torch.sum(loss) / (2 * pos)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from sklearn import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_log_mean_mul_pow_rsub_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tmp2 * tmp2
tmp4 = -0.25
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp8 = tl_math.log(tmp0)
tmp9 = tmp7 * tmp8
tmp10 = tmp0 * tmp0
tmp11 = 0.75
tmp12 = tmp10 * tmp11
tmp13 = tmp1 - tmp6
tmp14 = tmp12 * tmp13
tmp15 = tl_math.log(tmp2)
tmp16 = tmp14 * tmp15
tmp17 = tmp9 - tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = 256.0
tmp22 = tmp20 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_log_mean_mul_pow_rsub_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEFocalLossNew(torch.nn.Module):
"""
二分类的Focalloss alpha 固定
"""
def __init__(self, gamma=2, alpha=0.25, reduction='elementwise_mean'):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| CityU-AIM-Group/SIGMA | BCEFocalLoss | false | 17,439 | [
"MIT"
] | 5 | 19f89777db8d42f750a9b87756d3326c7efd18f5 | https://github.com/CityU-AIM-Group/SIGMA/tree/19f89777db8d42f750a9b87756d3326c7efd18f5 |
BiAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/p3/cp3czttlewk3fung3tr6hkantwkqxlbdlcxdpi3soj6ohrhnznjg.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul]
# Source node to ATen node mapping:
# _weight_norm => div, mul, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_4, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div), kwargs = {})
triton_per_fused_div_mul_norm_0 = async_compile.triton('triton_per_fused_div_mul_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp9 = tmp8 / tmp6
tmp10 = tmp0 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp10, rmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jj/cjjxujzilwzs63vlnoui44yzylbl3qpam5vrrt2yua4ftvwjx7x3.py
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# logits => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 768)
x5 = xindex % 192
x0 = xindex % 12
x2 = (xindex // 48) % 4
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x5 + (192*x4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (12*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x7), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nt/cnt6vlwxxxwvrhbuhwqnztgzjgngjwto2tj6sy5pg5oa45s2iivi.py
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# logits => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 12
x3 = (xindex // 192) % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (12*x0) + (48*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x5), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pd/cpddah45qonzxpt32uirthqgavu3xqofenznrfy7rvmc4weq7uj4.py
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# atten => amax, div_2, exp, sub, sum_3
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_7, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_3), kwargs = {})
triton_per_fused__softmax_3 = async_compile.triton('triton_per_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_3(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp8 / tmp12
tl.store(out_ptr2 + (r2 + (16*x3)), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nh/cnhibb242krhtudcghal7daj4luzsi2xgqn37mwwte6u22v5qxln.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (), ())
assert_size_stride(primals_7, (12, 4), (4, 1))
assert_size_stride(primals_8, (12, ), (1, ))
assert_size_stride(primals_9, (1, 4, 1, 12), (48, 12, 12, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_norm_0.run(buf1, primals_4, primals_3, buf2, 1, 48, grid=grid(1), stream=stream0)
buf3 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten.norm, aten.div, aten.mul]
triton_per_fused_div_mul_norm_0.run(buf5, primals_7, primals_6, buf6, 1, 48, grid=grid(1), stream=stream0)
buf7 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf6, (4, 12), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4, 12), (768, 192, 48, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf3, primals_9, buf8, 3072, grid=grid(3072), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 12, 4), (768, 192, 48, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf7, primals_8, buf9, 3072, grid=grid(3072), stream=stream0)
buf10 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (64, 4, 12), (48, 12, 1), 0), reinterpret_tensor(buf9, (64, 12, 4), (48, 4, 1), 0), out=buf10)
buf13 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten._softmax]
triton_per_fused__softmax_3.run(buf10, primals_10, buf13, 64, 16, grid=grid(64), stream=stream0)
del buf10
del primals_10
buf14 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf7, primals_8, buf14, 192, grid=grid(192), stream=stream0)
del buf7
del primals_8
return (reinterpret_tensor(buf13, (16, 4, 4, 4), (64, 16, 4, 1), 0), buf2, buf6, primals_3, primals_4, primals_6, primals_7, primals_9, buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf3, buf5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf8, (64, 12, 4), (48, 1, 12), 0), reinterpret_tensor(buf9, (64, 4, 12), (48, 1, 4), 0), buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 4, 1, 12), (48, 12, 12, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import functional as F
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
import torch.nn.modules.module
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class BiAttention(nn.Module):
def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0
):
super(BiAttention, self).__init__()
self.hidden_aug = 3
self.glimpses = glimpses
self.lin_v = FCNet(v_features, int(mid_features * self.hidden_aug),
activate='relu', drop=drop / 2.5)
self.lin_q = FCNet(q_features, int(mid_features * self.hidden_aug),
activate='relu', drop=drop / 2.5)
self.h_weight = nn.Parameter(torch.Tensor(1, glimpses, 1, int(
mid_features * self.hidden_aug)).normal_())
self.h_bias = nn.Parameter(torch.Tensor(1, glimpses, 1, 1).normal_())
self.drop = nn.Dropout(drop)
def forward(self, v, q):
"""
v = batch, num_obj, dim
q = batch, que_len, dim
"""
v_num = v.size(1)
q_num = q.size(1)
v_ = self.lin_v(v).unsqueeze(1)
q_ = self.lin_q(q).unsqueeze(1)
v_ = self.drop(v_)
h_ = v_ * self.h_weight
logits = torch.matmul(h_, q_.transpose(2, 3))
logits = logits + self.h_bias
atten = F.softmax(logits.view(-1, self.glimpses, v_num * q_num), 2)
return atten.view(-1, self.glimpses, v_num, q_num)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'v_features': 4, 'q_features': 4, 'mid_features': 4,
'glimpses': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch.nn as nn
from torch.nn.utils import weight_norm
import torch.nn.modules.module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0)
tmp7 = tl.load(in_ptr1 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp9 = tmp8 / tmp6
tmp10 = tmp0 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp10, rmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 768
x5 = xindex % 192
x0 = xindex % 12
x2 = xindex // 48 % 4
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x5 + 192 * x4), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 12 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x7, tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 12
x3 = xindex // 192 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 12 * x0 + 48 * x3), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_3(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp8 / tmp12
tl.store(out_ptr2 + (r2 + 16 * x3), tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (), ())
assert_size_stride(primals_7, (12, 4), (4, 1))
assert_size_stride(primals_8, (12,), (1,))
assert_size_stride(primals_9, (1, 4, 1, 12), (48, 12, 12, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_4, primals_3,
buf2, 1, 48, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(buf2, (4, 12), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf5, primals_7, primals_6,
buf6, 1, 48, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(buf6, (4, 12), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4, 12), (768, 192, 48, 12, 1),
torch.float32)
triton_poi_fused_clone_1[grid(3072)](buf3, primals_9, buf8, 3072,
XBLOCK=128, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 12, 4), (768, 192, 48, 4, 1),
torch.float32)
triton_poi_fused_clone_2[grid(3072)](buf7, primals_8, buf9, 3072,
XBLOCK=256, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (64, 4, 12), (48, 12, 1
), 0), reinterpret_tensor(buf9, (64, 12, 4), (48, 4, 1), 0),
out=buf10)
buf13 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_3[grid(64)](buf10, primals_10, buf13, 64,
16, XBLOCK=32, num_warps=4, num_stages=1)
del buf10
del primals_10
buf14 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(192)](buf7,
primals_8, buf14, 192, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
del primals_8
return (reinterpret_tensor(buf13, (16, 4, 4, 4), (64, 16, 4, 1), 0),
buf2, buf6, primals_3, primals_4, primals_6, primals_7, primals_9,
buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf3, buf5,
reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf13,
reinterpret_tensor(buf8, (64, 12, 4), (48, 1, 12), 0),
reinterpret_tensor(buf9, (64, 4, 12), (48, 1, 4), 0), buf14)
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class BiAttentionNew(nn.Module):
def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0
):
super(BiAttentionNew, self).__init__()
self.hidden_aug = 3
self.glimpses = glimpses
self.lin_v = FCNet(v_features, int(mid_features * self.hidden_aug),
activate='relu', drop=drop / 2.5)
self.lin_q = FCNet(q_features, int(mid_features * self.hidden_aug),
activate='relu', drop=drop / 2.5)
self.h_weight = nn.Parameter(torch.Tensor(1, glimpses, 1, int(
mid_features * self.hidden_aug)).normal_())
self.h_bias = nn.Parameter(torch.Tensor(1, glimpses, 1, 1).normal_())
self.drop = nn.Dropout(drop)
def forward(self, input_0, input_1):
primals_9 = self.h_weight
primals_10 = self.h_bias
primals_5 = self.lin_v.lin.bias
primals_3 = self.lin_v.lin.weight_g
primals_4 = self.lin_v.lin.weight_v
primals_8 = self.lin_q.lin.bias
primals_6 = self.lin_q.lin.weight_g
primals_7 = self.lin_q.lin.weight_v
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| ChCh1999/RTPB | BiAttention | false | 17,440 | [
"MIT"
] | 8 | 1066a3bfe4fe1b41eff74fd152936880302a60a2 | https://github.com/ChCh1999/RTPB/tree/1066a3bfe4fe1b41eff74fd152936880302a60a2 |
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/fo/cfoxfrrqk37jfliole43m4okhcbcabc6n53dfc6aqjml2ayvvxya.py
# Topologically Sorted Source Nodes: [dimension, attn_2], Original ATen: [aten.sqrt, aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp
# dimension => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%full_default, 0), kwargs = {})
# %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, %where_self), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %full_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_sqrt_0 = async_compile.triton('triton_poi_fused__softmax_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = 0.0
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6 * tmp1
tmp21 = tmp19 / tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yh/cyhf6bhaqimi2pucos5fnrpvhrt4vuaetbxnooyr5pvgjt7s6fgo.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dimension, attn_2], Original ATen: [aten.sqrt, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_sqrt_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from typing import Optional
class ScaledDotProductAttention(nn.Module):
def __init__(self, dropout: 'Optional[float]'=None, scale: 'bool'=True):
super(ScaledDotProductAttention, self).__init__()
if dropout is not None:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = dropout
self.softmax = nn.Softmax(dim=2)
self.scale = scale
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.permute(0, 2, 1))
if self.scale:
dimension = torch.sqrt(torch.tensor(k.shape[-1]))
attn = attn / dimension
if mask is not None:
attn = attn.masked_fill(mask, -1000000000.0)
attn = self.softmax(attn)
if self.dropout is not None:
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = 0.0
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6 * tmp1
tmp21 = tmp19 / tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
def __init__(self, dropout: 'Optional[float]'=None, scale: 'bool'=True):
super(ScaledDotProductAttentionNew, self).__init__()
if dropout is not None:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = dropout
self.softmax = nn.Softmax(dim=2)
self.scale = scale
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| IusztinPaul/yacht | ScaledDotProductAttention | false | 17,441 | [
"Apache-2.0"
] | 5 | c68ab7c66bde860bb91534c29e97772ba328adb5 | https://github.com/IusztinPaul/yacht/tree/c68ab7c66bde860bb91534c29e97772ba328adb5 |
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nk/cnkqvnf65csnchzgcwfmzvir35zlp7mwibh6ixgyd2q5lxsz4ye6.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/56/c56setgywsq6grelphibiuqwm3o2wx6hltvac4jtl3mjvpggrk2n.py
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# res => gt
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vo/cvobgo2rkbxi5a23n5cdy3xwawdecd2z4cog4ffjpfkk54k2exsk.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %primals_2], 2), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 8, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + ((4*x1) + ((-4) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4f/c4fkzsoo2u32fxt2hzav5qkj7ufvqgsypkd7yvbkjakbadjpshe7.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf1, primals_4, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf2, buf1, primals_4, primals_2, buf3, 128, grid=grid(128), stream=stream0)
del primals_2
del primals_4
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf5, primals_6, primals_1, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_6
return (buf5, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, reinterpret_tensor(buf3, (16, 8), (8, 1), 0), primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.distributed
class ResBlock(nn.Module):
def __init__(self, feature_size, action_size):
super(ResBlock, self).__init__()
self.lin_1 = nn.Linear(feature_size + action_size, feature_size)
self.lin_2 = nn.Linear(feature_size + action_size, feature_size)
def forward(self, x, action):
res = nn.functional.leaky_relu(self.lin_1(torch.cat([x, action], 2)))
res = self.lin_2(torch.cat([res, action], 2))
return res + x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'feature_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp17 = tl.load(in_ptr3 + (4 * x1 + (-4 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf1, primals_4, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf2, buf1, primals_4, primals_2,
buf3, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf4 = buf1
del buf1
extern_kernels.mm(reinterpret_tensor(buf3, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
triton_poi_fused_add_3[grid(64)](buf5, primals_6, primals_1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_6
return buf5, reinterpret_tensor(buf0, (16, 8), (8, 1), 0
), buf2, reinterpret_tensor(buf3, (16, 8), (8, 1), 0), primals_5
class ResBlockNew(nn.Module):
def __init__(self, feature_size, action_size):
super(ResBlockNew, self).__init__()
self.lin_1 = nn.Linear(feature_size + action_size, feature_size)
self.lin_2 = nn.Linear(feature_size + action_size, feature_size)
def forward(self, input_0, input_1):
primals_3 = self.lin_1.weight
primals_4 = self.lin_1.bias
primals_5 = self.lin_2.weight
primals_6 = self.lin_2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Improbable-AI/curiosity_baselines | ResBlock | false | 17,442 | [
"MIT"
] | 5 | 42dca92b2fb66c0790a72206bf48595d3b5b487f | https://github.com/Improbable-AI/curiosity_baselines/tree/42dca92b2fb66c0790a72206bf48595d3b5b487f |
ZReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/gr/cgr23yfez22r2hzi3gbjpfkbyvkepoy455oygkc27unggysxyhtv.py
# Topologically Sorted Source Nodes: [phase, setitem], Original ATen: [aten.atan2, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# phase => atan2
# setitem => full_default, index_put
# Graph fragment:
# %atan2 : [num_users=2] = call_function[target=torch.ops.aten.atan2.default](args = (%squeeze_1, %squeeze), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%atan2, [%ne], %full_default), kwargs = {})
triton_poi_fused_atan2_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_atan2_index_put_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_atan2_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_atan2_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp2 = libdevice.atan2(tmp0, tmp1)
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xg/cxgvqpcvn2g4mtmawydkotfe3pmi7qw2fb77e6vb54nseu3skcpi.py
# Topologically Sorted Source Nodes: [phase_2, le, ge, tensor, output, tensor_1, output_1], Original ATen: [aten.cat, aten.le, aten.ge, aten.lift_fresh, aten.where]
# Source node to ATen node mapping:
# ge => ge
# le => le
# output => where
# output_1 => where_1
# phase_2 => clone
# tensor => full_default_1
# tensor_1 => full_default_2
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%clone, 1.5707963267948966), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%clone, 0.0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge, %arg0_1, %full_default_1), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %where, %full_default_2), kwargs = {})
triton_poi_fused_cat_ge_le_lift_fresh_where_1 = async_compile.triton('triton_poi_fused_cat_ge_le_lift_fresh_where_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_ge_le_lift_fresh_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_ge_le_lift_fresh_where_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = 1.5707963267948966
tmp2 = tmp0 <= tmp1
tmp3 = 0.0
tmp4 = tmp0 >= tmp3
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.where(tmp2, tmp6, tmp3)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 2), (32, 8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [phase, setitem], Original ATen: [aten.atan2, aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_atan2_index_put_lift_fresh_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [phase_2, le, ge, tensor, output, tensor_1, output_1], Original ATen: [aten.cat, aten.le, aten.ge, aten.lift_fresh, aten.where]
triton_poi_fused_cat_ge_le_lift_fresh_where_1.run(buf0, arg0_1, buf1, 128, grid=grid(128), stream=stream0)
del arg0_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 2), (32, 8, 2, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import numpy
import torch
import numpy as np
import torch.nn as nn
import numpy.matlib
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
class ZReLU(nn.Module):
def __init__(self, polar=False):
super(ZReLU, self).__init__()
self.polar = polar
def forward(self, input):
ndims = input.ndimension()
input_real = input.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
input_imag = input.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
if not self.polar:
_mag, phase = cylindricalToPolarConversion(input_real, input_imag)
else:
phase = input_imag
phase = phase.unsqueeze(-1)
phase = torch.cat([phase, phase], ndims - 1)
output = torch.where(phase >= 0.0, input, torch.tensor(0.0))
output = torch.where(phase <= np.pi / 2, output, torch.tensor(0.0))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 2])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_atan2_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp2 = libdevice.atan2(tmp0, tmp1)
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_cat_ge_le_lift_fresh_where_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x2, xmask)
tmp1 = 1.5707963267948966
tmp2 = tmp0 <= tmp1
tmp3 = 0.0
tmp4 = tmp0 >= tmp3
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.where(tmp2, tmp6, tmp3)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 2), (32, 8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_atan2_index_put_lift_fresh_0[grid(64)](arg0_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused_cat_ge_le_lift_fresh_where_1[grid(128)](buf0,
arg0_1, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf0
return buf1,
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
class ZReLUNew(nn.Module):
def __init__(self, polar=False):
super(ZReLUNew, self).__init__()
self.polar = polar
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| HMS-CardiacMR/MyoMapNet-Myocardial-Parametric-Mapping | ZReLU | false | 17,443 | [
"MIT"
] | 4 | 1e2dee8d6d1f97722eba91618462537faf9efba7 | https://github.com/HMS-CardiacMR/MyoMapNet-Myocardial-Parametric-Mapping/tree/1e2dee8d6d1f97722eba91618462537faf9efba7 |
Dense | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4r/c4rklke4lmzomwifevvy35bxknpxm5xqk6edtk7oabepnldmarln.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3u/c3ufobh3o7dojvdf6laxv5mdezyzgoczilnh47pda5qeuwm3yn3k.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_5), kwargs = {})
# %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3y/c3yspv323ozehvu3m3uqwecpepvfdiox4iackt62sggbbemrvkkx.py
# Topologically Sorted Source Nodes: [P], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# P => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (7*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (7*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (7*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (7*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (7*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (7*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (7*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp1 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp3 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp5 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp7 - tmp12
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp9 - tmp12
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp11 - tmp12
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tl.store(out_ptr0 + (x0), tmp12, xmask)
tl.store(out_ptr1 + (x0), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7t/c7thtzylhueh4qduzo4lfsl2df7twxjbyau743sepoouuo6hc75i.py
# Topologically Sorted Source Nodes: [P], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# P => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 7)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/e4/ce4hbye4ssxampmb4gl4gwe7g5w35a3tvvfdcxiu6dhg2ldu7igd.py
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# v => tanh
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_4 = async_compile.triton('triton_poi_fused_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 42), (42, 1))
assert_size_stride(primals_2, (32, 42), (42, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (16, 32), (32, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (7, 16), (16, 1))
assert_size_stride(primals_7, (7, ), (1, ))
assert_size_stride(primals_8, (1, 16), (16, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (42, 32), (1, 42), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 128, grid=grid(128), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (32, 16), (1, 32), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (16, 7), (1, 16), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [P], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, buf6, 4, grid=grid(4), stream=stream0)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [P], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf7, buf5, buf6, 28, grid=grid(28), stream=stream0)
del buf5
buf8 = reinterpret_tensor(buf6, (4, 1), (1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (16, 1), (1, 16), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.tanh]
triton_poi_fused_tanh_4.run(buf9, primals_9, 4, grid=grid(4), stream=stream0)
del primals_9
return (buf7, buf9, primals_1, buf1, buf3, buf7, buf9, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 42), (42, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 42), (42, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((7, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((7, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as functions
class Dense(nn.Module):
def __init__(self):
super(Dense, self).__init__()
self.fc1 = nn.Linear(6 * 7, 32)
self.fc2 = nn.Linear(32, 16)
self.probhead = nn.Linear(16, 7)
self.valuehead = nn.Linear(16, 1)
self.soft = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
def forward(self, x):
x = x.view(-1, 6 * 7)
x = functions.relu(self.fc1(x))
x = functions.relu(self.fc2(x))
P = self.soft(self.probhead(x))
v = self.tanh(self.valuehead(x))
return P, v
def get_inputs():
return [torch.rand([4, 42])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 7 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 7 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 7 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 7 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 7 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + 7 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + 7 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp1 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp3 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp5 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp7 - tmp12
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp9 - tmp12
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp11 - tmp12
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tl.store(out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr1 + x0, tmp32, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 7
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 42), (42, 1))
assert_size_stride(primals_2, (32, 42), (42, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (16, 32), (32, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (7, 16), (16, 1))
assert_size_stride(primals_7, (7,), (1,))
assert_size_stride(primals_8, (1, 16), (16, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (42, 32),
(1, 42), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(128)](buf1, primals_3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (32, 16), (1,
32), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(16, 7), (1, 16), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_2[grid(4)](buf4, buf5, buf6, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf7 = buf4
del buf4
triton_poi_fused__softmax_3[grid(28)](buf7, buf5, buf6, 28, XBLOCK=
32, num_warps=1, num_stages=1)
del buf5
buf8 = reinterpret_tensor(buf6, (4, 1), (1, 1), 0)
del buf6
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (16, 1), (1,
16), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_tanh_4[grid(4)](buf9, primals_9, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_9
return (buf7, buf9, primals_1, buf1, buf3, buf7, buf9, primals_8,
primals_6, primals_4)
class DenseNew(nn.Module):
def __init__(self):
super(DenseNew, self).__init__()
self.fc1 = nn.Linear(6 * 7, 32)
self.fc2 = nn.Linear(32, 16)
self.probhead = nn.Linear(16, 7)
self.valuehead = nn.Linear(16, 1)
self.soft = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.probhead.weight
primals_7 = self.probhead.bias
primals_8 = self.valuehead.weight
primals_9 = self.valuehead.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| IvLabs/model-based-RL | Dense | false | 17,444 | [
"MIT"
] | 7 | 8d22eabf7bf2601629015ef6c869e3850c306d6f | https://github.com/IvLabs/model-based-RL/tree/8d22eabf7bf2601629015ef6c869e3850c306d6f |
ActorCritic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/s6/cs6uj6qqbm27rlx4uou6t6cg76fhawo5c7ydsv7qn3oecfm2xd6q.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/iu/ciunlwlwigvbqgcklhexdjrvmqxms4okjyjzw2eyhyrkp3ecjlmh.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# mean => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fr/cfrz3gjkrcgxdms6zw57tqw45ug4sq6bddofcwpy3m2wq5hk55zp.py
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.softplus]
# Source node to ATen node mapping:
# v => exp, gt, log1p, where
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_8,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_8, 20), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_8, %log1p), kwargs = {})
triton_poi_fused_softplus_2 = async_compile.triton('triton_poi_fused_softplus_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/eb/cebbgcilbe77zuc6fc6gz5yw4kqwbt5wnazz4p5orl2tarwwryyc.py
# Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%normal, %tanh), kwargs = {})
triton_poi_fused_sub_3 = async_compile.triton('triton_poi_fused_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tmp0 - tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vz/cvzswdjihqpj3ajv2pinqikvajbgzu4a4jeyonnwsfniyxudd4rv.py
# Topologically Sorted Source Nodes: [var, mul], Original ATen: [aten.pow, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# var => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand, 2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 2), kwargs = {})
triton_poi_fused_mul_pow_4 = async_compile.triton('triton_poi_fused_mul_pow_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_pow_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ux/cuxiod3rj6glm2b2ecbunhsh2bwugl4wn5vy6txbvmflqelmi6bj.py
# Topologically Sorted Source Nodes: [log_scale, pow_2, neg, truediv, sub_1, log_prob, log_prob_1, entropy, entropy_1], Original ATen: [aten.log, aten.pow, aten.neg, aten.div, aten.sub, aten.sum, aten.add]
# Source node to ATen node mapping:
# entropy => add
# entropy_1 => sum_2
# log_prob => sub_2
# log_prob_1 => sum_1
# log_scale => log
# neg => neg
# pow_2 => pow_2
# sub_1 => sub_1
# truediv => div
# Graph fragment:
# %log : [num_users=2] = call_function[target=torch.ops.aten.log.default](args = (%expand,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %mul), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %log), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, 0.9189385332046727), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_2, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, 1.4189385332046727), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [1], True), kwargs = {})
triton_poi_fused_add_div_log_neg_pow_sub_sum_5 = async_compile.triton('triton_poi_fused_add_div_log_neg_pow_sub_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_neg_pow_sub_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_log_neg_pow_sub_sum_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x4 = xindex % 16
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr1 + (x4 + (64*x2)), xmask)
tmp5 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (16 + x4 + (64*x2)), xmask)
tmp13 = tl.load(in_ptr1 + (16 + x4 + (64*x2)), xmask)
tmp18 = tl.load(in_ptr0 + (32 + x4 + (64*x2)), xmask)
tmp21 = tl.load(in_ptr1 + (32 + x4 + (64*x2)), xmask)
tmp26 = tl.load(in_ptr0 + (48 + x4 + (64*x2)), xmask)
tmp29 = tl.load(in_ptr1 + (48 + x4 + (64*x2)), xmask)
tmp1 = tmp0 * tmp0
tmp2 = -tmp1
tmp4 = tmp2 / tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tmp8 = 0.9189385332046727
tmp9 = tmp7 - tmp8
tmp11 = tmp10 * tmp10
tmp12 = -tmp11
tmp14 = tmp12 / tmp13
tmp15 = tmp14 - tmp6
tmp16 = tmp15 - tmp8
tmp17 = tmp9 + tmp16
tmp19 = tmp18 * tmp18
tmp20 = -tmp19
tmp22 = tmp20 / tmp21
tmp23 = tmp22 - tmp6
tmp24 = tmp23 - tmp8
tmp25 = tmp17 + tmp24
tmp27 = tmp26 * tmp26
tmp28 = -tmp27
tmp30 = tmp28 / tmp29
tmp31 = tmp30 - tmp6
tmp32 = tmp31 - tmp8
tmp33 = tmp25 + tmp32
tmp34 = 1.4189385332046727
tmp35 = tmp6 + tmp34
tmp36 = tmp35 + tmp35
tmp37 = tmp36 + tmp35
tmp38 = tmp37 + tmp35
tl.store(out_ptr0 + (x5), tmp33, xmask)
tl.store(out_ptr1 + (x5), tmp38, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, 64), (64, 1))
assert_size_stride(primals_10, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
buf16 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf16, 4096, grid=grid(4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf15, 4096, grid=grid(4096), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.softplus]
triton_poi_fused_softplus_2.run(primals_8, buf6, 4, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [mean, action], Original ATen: [aten.tanh, aten.normal]
buf7 = torch.ops.aten.normal.Tensor_Tensor(buf5, reinterpret_tensor(buf6, (4, 4, 4, 4), (0, 0, 0, 1), 0))
buf8 = buf7
del buf7
buf9 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub]
triton_poi_fused_sub_3.run(buf9, buf8, 256, grid=grid(256), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [var, mul], Original ATen: [aten.pow, aten.mul]
triton_poi_fused_mul_pow_4.run(buf6, buf10, 256, grid=grid(256), stream=stream0)
buf11 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_scale, pow_2, neg, truediv, sub_1, log_prob, log_prob_1, entropy, entropy_1], Original ATen: [aten.log, aten.pow, aten.neg, aten.div, aten.sub, aten.sum, aten.add]
triton_poi_fused_add_div_log_neg_pow_sub_sum_5.run(buf9, buf10, buf6, buf11, buf12, 64, grid=grid(64), stream=stream0)
del buf6
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_9, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf14)
del primals_10
return (buf8, buf11, buf12, reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 64), (64, 1), 0), buf4, buf9, buf10, primals_9, primals_6, buf15, primals_4, buf16, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorCritic(nn.Module):
""" Actor Critic neural network with shared body.
The Actor maps states (actions) to action, log_probs, entropy.
The Critic maps states to values.
"""
def __init__(self, state_size, action_size, seed=0):
""" Initialize the neural net.
Params
======
state_size: dimension of each input state
action_size: dimension of each output
seed: random seed
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1_body = nn.Linear(state_size, 64)
self.fc2_body = nn.Linear(64, 64)
self.fc3_actor = nn.Linear(64, action_size)
self.std = nn.Parameter(torch.ones(1, action_size))
self.fc3_critic = nn.Linear(64, 1)
def forward(self, state, action=None):
x = torch.Tensor(state)
x = F.relu(self.fc1_body(x))
x = F.relu(self.fc2_body(x))
mean = torch.tanh(self.fc3_actor(x))
dist = torch.distributions.Normal(mean, F.softplus(self.std))
if action is None:
action = dist.sample()
log_prob = dist.log_prob(action)
log_prob = torch.sum(log_prob, dim=1, keepdim=True)
entropy = dist.entropy()
entropy = torch.sum(entropy, dim=1, keepdim=True)
value = self.fc3_critic(x)
return action, log_prob, entropy, value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_sub_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_pow_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_div_log_neg_pow_sub_sum_5(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x4 = xindex % 16
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr1 + (x4 + 64 * x2), xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (16 + x4 + 64 * x2), xmask)
tmp13 = tl.load(in_ptr1 + (16 + x4 + 64 * x2), xmask)
tmp18 = tl.load(in_ptr0 + (32 + x4 + 64 * x2), xmask)
tmp21 = tl.load(in_ptr1 + (32 + x4 + 64 * x2), xmask)
tmp26 = tl.load(in_ptr0 + (48 + x4 + 64 * x2), xmask)
tmp29 = tl.load(in_ptr1 + (48 + x4 + 64 * x2), xmask)
tmp1 = tmp0 * tmp0
tmp2 = -tmp1
tmp4 = tmp2 / tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tmp8 = 0.9189385332046727
tmp9 = tmp7 - tmp8
tmp11 = tmp10 * tmp10
tmp12 = -tmp11
tmp14 = tmp12 / tmp13
tmp15 = tmp14 - tmp6
tmp16 = tmp15 - tmp8
tmp17 = tmp9 + tmp16
tmp19 = tmp18 * tmp18
tmp20 = -tmp19
tmp22 = tmp20 / tmp21
tmp23 = tmp22 - tmp6
tmp24 = tmp23 - tmp8
tmp25 = tmp17 + tmp24
tmp27 = tmp26 * tmp26
tmp28 = -tmp27
tmp30 = tmp28 / tmp29
tmp31 = tmp30 - tmp6
tmp32 = tmp31 - tmp8
tmp33 = tmp25 + tmp32
tmp34 = 1.4189385332046727
tmp35 = tmp6 + tmp34
tmp36 = tmp35 + tmp35
tmp37 = tmp36 + tmp35
tmp38 = tmp37 + tmp35
tl.store(out_ptr0 + x5, tmp33, xmask)
tl.store(out_ptr1 + x5, tmp38, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, 64), (64, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf16 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_3, buf16, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
buf15 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.bool)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3,
primals_5, buf15, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_softplus_2[grid(4)](primals_8, buf6, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf7 = torch.ops.aten.normal.Tensor_Tensor(buf5, reinterpret_tensor
(buf6, (4, 4, 4, 4), (0, 0, 0, 1), 0))
buf8 = buf7
del buf7
buf9 = buf5
del buf5
triton_poi_fused_sub_3[grid(256)](buf9, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_pow_4[grid(256)](buf6, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_log_neg_pow_sub_sum_5[grid(64)](buf9,
buf10, buf6, buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1
)
del buf6
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_9, (64, 1), (1, 64), 0),
alpha=1, beta=1, out=buf14)
del primals_10
return buf8, buf11, buf12, reinterpret_tensor(buf14, (4, 4, 4, 1), (16,
4, 1, 1), 0), primals_8, reinterpret_tensor(primals_1, (64, 4), (4,
1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0
), reinterpret_tensor(buf3, (64, 64), (64, 1), 0
), buf4, buf9, buf10, primals_9, primals_6, buf15, primals_4, buf16
class ActorCriticNew(nn.Module):
""" Actor Critic neural network with shared body.
The Actor maps states (actions) to action, log_probs, entropy.
The Critic maps states to values.
"""
def __init__(self, state_size, action_size, seed=0):
""" Initialize the neural net.
Params
======
state_size: dimension of each input state
action_size: dimension of each output
seed: random seed
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1_body = nn.Linear(state_size, 64)
self.fc2_body = nn.Linear(64, 64)
self.fc3_actor = nn.Linear(64, action_size)
self.std = nn.Parameter(torch.ones(1, action_size))
self.fc3_critic = nn.Linear(64, 1)
def forward(self, input_0):
primals_8 = self.std
primals_2 = self.fc1_body.weight
primals_3 = self.fc1_body.bias
primals_4 = self.fc2_body.weight
primals_5 = self.fc2_body.bias
primals_6 = self.fc3_actor.weight
primals_7 = self.fc3_actor.bias
primals_9 = self.fc3_critic.weight
primals_10 = self.fc3_critic.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1], output[2], output[3]
| ImmanuelXIV/ppo-self-play | ActorCritic | false | 17,445 | [
"MIT"
] | 7 | 21c000492b2450628b5a506d4101b7b12e5755e0 | https://github.com/ImmanuelXIV/ppo-self-play/tree/21c000492b2450628b5a506d4101b7b12e5755e0 |
ResidualSequential | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/a7/ca7pdneypboobvtjlxb5wqw5hbsxdwkyrnmyptck6ghjmsapnstb.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 + tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.optim
import torch.nn as nn
import torch
import torch.nn.init
class ResidualSequential(nn.Sequential):
def __init__(self, *args):
super(ResidualSequential, self).__init__(*args)
def forward(self, x):
out = super(ResidualSequential, self).forward(x)
x_ = None
if out.size(2) != x.size(2) or out.size(3) != x.size(3):
diff2 = x.size(2) - out.size(2)
diff3 = x.size(3) - out.size(3)
x_ = x[:, :, diff2 / 2:out.size(2) + diff2 / 2, diff3 / 2:out.
size(3) + diff3 / 2]
else:
x_ = x
return out + x_
def eval(self):
None
for m in self.modules():
m.eval()
exit()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.optim
import torch.nn as nn
import torch
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 + tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ResidualSequentialNew(nn.Sequential):
def __init__(self, *args):
super(ResidualSequentialNew, self).__init__(*args)
def eval(self):
None
for m in self.modules():
m.eval()
exit()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Jay-Lewis/phase_retrieval | ResidualSequential | false | 17,446 | [
"MIT"
] | 4 | 799cef92852c53e62e2a548f605652923e979456 | https://github.com/Jay-Lewis/phase_retrieval/tree/799cef92852c53e62e2a548f605652923e979456 |
ConvLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/zo/czoefxnecwpok2nmimcnrjk6tumldvtx7xkzpmzgmnay3tkwhjwg.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-2) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7x/c7xksf46hekvm7qrrnxlo34nyt2yrkxxbfiyydgamwacppezvp4w.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 25) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 400, grid=grid(400), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(400)](buf2, primals_3, 400,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class ConvLayerNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayerNew, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_3 = self.conv2d.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| JEF1056/Reconstruction-Style | ConvLayer | false | 17,447 | [
"MIT"
] | 6 | 3430d9e9f05c6980ae251cf15b619148a2c899d6 | https://github.com/JEF1056/Reconstruction-Style/tree/3430d9e9f05c6980ae251cf15b619148a2c899d6 |
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/sy/csyfm37232zryo5jgkiou5qrdat6jk256xfejwmgc6z46hrtpc6c.py
# Topologically Sorted Source Nodes: [scores, log_probs, mul, sum_1, log_likelihood], Original ATen: [aten.add, aten._log_softmax, aten.mul, aten.sum, aten.mean]
# Source node to ATen node mapping:
# log_likelihood => mean
# log_probs => amax, exp, log, sub, sub_1, sum_1
# mul => mul
# scores => add
# sum_1 => sum_2
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_4), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {})
triton_per_fused__log_softmax_add_mean_mul_sum_0 = async_compile.triton('triton_per_fused__log_softmax_add_mean_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_add_mean_mul_sum_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_add_mean_mul_sum_0(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = (rindex // 16)
r4 = rindex % 16
r0 = rindex % 4
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r4 + (64*r2)), None)
tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r4 + (64*r2)), None)
tmp6 = tl.load(in_ptr0 + (32 + r4 + (64*r2)), None)
tmp9 = tl.load(in_ptr0 + (48 + r4 + (64*r2)), None)
tmp25 = tl.load(in_ptr2 + (r4 + (64*r2)), None)
tmp28 = tl.load(in_ptr2 + (16 + r4 + (64*r2)), None)
tmp32 = tl.load(in_ptr2 + (32 + r4 + (64*r2)), None)
tmp36 = tl.load(in_ptr2 + (48 + r4 + (64*r2)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 + tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp4 - tmp11
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp7 - tmp11
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp10 - tmp11
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tmp12 - tmp23
tmp26 = tmp24 * tmp25
tmp27 = tmp14 - tmp23
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tmp17 - tmp23
tmp33 = tmp31 * tmp32
tmp34 = tmp30 + tmp33
tmp35 = tmp20 - tmp23
tmp37 = tmp35 * tmp36
tmp38 = tmp34 + tmp37
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [scores, log_probs, mul, sum_1, log_likelihood], Original ATen: [aten.add, aten._log_softmax, aten.mul, aten.sum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused__log_softmax_add_mean_mul_sum_0.run(buf5, buf0, primals_3, primals_4, 1, 64, grid=grid(1), stream=stream0)
return (buf5, primals_3, primals_4, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, dim_encoding, vocab_size):
super().__init__()
self.E = nn.Embedding(dim_encoding, vocab_size)
self.b = nn.Parameter(torch.zeros(1, vocab_size))
def forward(self, Z, targets):
scores = Z @ self.E.weight + self.b
log_probs = scores.log_softmax(dim=1)
log_likelihood = (log_probs * targets).sum(1).mean()
return log_likelihood
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_encoding': 4, 'vocab_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__log_softmax_add_mean_mul_sum_0(in_out_ptr1, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex // 16
r4 = rindex % 16
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r4 + 64 * r2), None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r4 + 64 * r2), None)
tmp6 = tl.load(in_ptr0 + (32 + r4 + 64 * r2), None)
tmp9 = tl.load(in_ptr0 + (48 + r4 + 64 * r2), None)
tmp25 = tl.load(in_ptr2 + (r4 + 64 * r2), None)
tmp28 = tl.load(in_ptr2 + (16 + r4 + 64 * r2), None)
tmp32 = tl.load(in_ptr2 + (32 + r4 + 64 * r2), None)
tmp36 = tl.load(in_ptr2 + (48 + r4 + 64 * r2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 + tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp4 - tmp11
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp7 - tmp11
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp10 - tmp11
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tmp12 - tmp23
tmp26 = tmp24 * tmp25
tmp27 = tmp14 - tmp23
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tmp17 - tmp23
tmp33 = tmp31 * tmp32
tmp34 = tmp30 + tmp33
tmp35 = tmp20 - tmp23
tmp37 = tmp35 * tmp36
tmp38 = tmp34 + tmp37
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
get_raw_stream(0)
triton_per_fused__log_softmax_add_mean_mul_sum_0[grid(1)](buf5,
buf0, primals_3, primals_4, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
return buf5, primals_3, primals_4, buf0, reinterpret_tensor(primals_2,
(4, 64), (1, 4), 0)
class DecoderNew(nn.Module):
def __init__(self, dim_encoding, vocab_size):
super().__init__()
self.E = nn.Embedding(dim_encoding, vocab_size)
self.b = nn.Parameter(torch.zeros(1, vocab_size))
def forward(self, input_0, input_1):
primals_3 = self.b
primals_1 = self.E.weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| J-zin/SNUH | Decoder | false | 17,448 | [
"MIT"
] | 4 | e4bde66609e1480f890b8386046431d488b825bd | https://github.com/J-zin/SNUH/tree/e4bde66609e1480f890b8386046431d488b825bd |
Resample | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ss/css3tfouziwfbxdocm5xcwbjnkuvu5t5mh2kabiwtizoogyhpioo.py
# Topologically Sorted Source Nodes: [sigmoid, mul, x], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# x => mul_1
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, x], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from typing import Optional
class LinearStack(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int',
activation_fn: 'Optional[nn.Module]'=None, n: 'int'=1,
hidden_features: 'Optional[int]'=None, dropout: 'Optional[float]'=None
):
super().__init__()
if hidden_features is None or n == 1:
hidden_features = out_features
modules = []
for i in range(n):
if i == 0:
modules.append(nn.Linear(in_features=in_features,
out_features=hidden_features))
elif 1 < i < n - 1:
modules.append(nn.Linear(in_features=hidden_features,
out_features=hidden_features))
else:
modules.append(nn.Linear(in_features=hidden_features,
out_features=out_features))
if activation_fn is not None:
modules.append(activation_fn())
if dropout is not None and dropout > 0:
modules.append(nn.Dropout(p=dropout))
self.net = nn.Sequential(*modules)
self.init_weights()
def init_weights(self):
for n, p in self.named_parameters():
if 'bias' in n:
torch.nn.init.zeros_(p)
elif 'weight' in n:
torch.nn.init.xavier_uniform_(p)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
return self.net(x)
class Resample(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int',
activation_fn: 'nn.Module', trainable_add: 'bool'=True, dropout:
'Optional[float]'=None):
super().__init__()
self.in_features = in_features
self.trainable_add = trainable_add
self.out_features = out_features
self.dropout = dropout
if self.in_features != self.out_features:
self.resample = LinearStack(in_features=self.in_features,
out_features=self.out_features, activation_fn=activation_fn,
dropout=self.dropout)
if self.trainable_add:
self.mask = nn.Parameter(torch.zeros(self.out_features, dtype=
torch.float))
self.gate = nn.Sigmoid()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
if self.in_features != self.out_features:
x = self.resample(x)
if self.trainable_add:
x = x * self.gate(self.mask) * 2.0
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'activation_fn': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class LinearStack(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int',
activation_fn: 'Optional[nn.Module]'=None, n: 'int'=1,
hidden_features: 'Optional[int]'=None, dropout: 'Optional[float]'=None
):
super().__init__()
if hidden_features is None or n == 1:
hidden_features = out_features
modules = []
for i in range(n):
if i == 0:
modules.append(nn.Linear(in_features=in_features,
out_features=hidden_features))
elif 1 < i < n - 1:
modules.append(nn.Linear(in_features=hidden_features,
out_features=hidden_features))
else:
modules.append(nn.Linear(in_features=hidden_features,
out_features=out_features))
if activation_fn is not None:
modules.append(activation_fn())
if dropout is not None and dropout > 0:
modules.append(nn.Dropout(p=dropout))
self.net = nn.Sequential(*modules)
self.init_weights()
def init_weights(self):
for n, p in self.named_parameters():
if 'bias' in n:
torch.nn.init.zeros_(p)
elif 'weight' in n:
torch.nn.init.xavier_uniform_(p)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
return self.net(x)
class ResampleNew(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int',
activation_fn: 'nn.Module', trainable_add: 'bool'=True, dropout:
'Optional[float]'=None):
super().__init__()
self.in_features = in_features
self.trainable_add = trainable_add
self.out_features = out_features
self.dropout = dropout
if self.in_features != self.out_features:
self.resample = LinearStack(in_features=self.in_features,
out_features=self.out_features, activation_fn=activation_fn,
dropout=self.dropout)
if self.trainable_add:
self.mask = nn.Parameter(torch.zeros(self.out_features, dtype=
torch.float))
self.gate = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.mask
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| IusztinPaul/yacht | Resample | false | 17,449 | [
"Apache-2.0"
] | 5 | c68ab7c66bde860bb91534c29e97772ba328adb5 | https://github.com/IusztinPaul/yacht/tree/c68ab7c66bde860bb91534c29e97772ba328adb5 |
ResampleNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/h6/ch6q3izhba3lypwklax5sm2hsvlao3o3o2azj2rejicwwsi6ukxh.py
# Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm]
# Source node to ATen node mapping:
# mul => mul
# output => var_mean
# sigmoid => sigmoid
# x => mul_1
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%mul_1, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_mul_native_layer_norm_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_native_layer_norm_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_native_layer_norm_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp7 * tmp10
tmp12 = tmp11 * tmp5
tmp13 = tmp6 + tmp12
tmp17 = tl.sigmoid(tmp16)
tmp18 = tmp14 * tmp17
tmp19 = tmp18 * tmp5
tmp20 = tmp13 + tmp19
tmp24 = tl.sigmoid(tmp23)
tmp25 = tmp21 * tmp24
tmp26 = tmp25 * tmp5
tmp27 = tmp20 + tmp26
tmp28 = 4.0
tmp29 = tmp27 / tmp28
tmp30 = tmp6 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = tmp12 - tmp29
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tmp19 - tmp29
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp38 = tmp26 - tmp29
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp41 = tmp40 / tmp28
tl.store(out_ptr0 + (x0), tmp29, xmask)
tl.store(out_ptr1 + (x0), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/o7/co7xodkamutibp3cphoefilechlj4niha2vkc2hioan2l3yb7rif.py
# Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm]
# Source node to ATen node mapping:
# mul => mul
# output => add, add_1, mul_2, mul_3, rsqrt, sub
# sigmoid => sigmoid
# x => mul_1
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %getitem_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_4), kwargs = {})
triton_poi_fused_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_native_layer_norm_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_native_layer_norm_sigmoid_0.run(primals_2, primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm]
triton_poi_fused_mul_native_layer_norm_sigmoid_1.run(primals_2, primals_1, buf0, buf1, primals_3, primals_4, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_4
return (buf2, primals_1, primals_2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class LearnableInterpolation(nn.Module):
def __init__(self, input_size: 'int', output_size: 'int', trainable:
'bool'=False):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.trainable = trainable
self.lin = nn.Linear(in_features=self.input_size, out_features=self
.output_size)
self.init_weights()
if self.trainable:
self.mask = nn.Parameter(torch.zeros(self.output_size, dtype=
torch.float32))
self.gate = nn.Sigmoid()
def init_weights(self):
for name, p in self.named_parameters():
if 'bias' not in name:
torch.nn.init.xavier_uniform_(p)
else:
torch.nn.init.zeros_(p)
def forward(self, x):
upsampled = self.lin(x)
if self.trainable:
upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0
return upsampled
class TimeDistributedInterpolation(nn.Module):
def __init__(self, output_size: 'int', batch_first: 'bool'=False,
trainable: 'bool'=False):
super().__init__()
self.output_size = output_size
self.batch_first = batch_first
self.trainable = trainable
if self.trainable:
self.mask = nn.Parameter(torch.zeros(self.output_size, dtype=
torch.float32))
self.gate = nn.Sigmoid()
def interpolate(self, x):
upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode=
'linear', align_corners=True).squeeze(1)
if self.trainable:
upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0
return upsampled
def forward(self, x):
if len(x.size()) <= 2:
return self.interpolate(x)
x_reshape = x.contiguous().view(-1, x.size(-1))
y = self.interpolate(x_reshape)
if self.batch_first:
y = y.contiguous().view(x.size(0), -1, y.size(-1))
else:
y = y.view(-1, x.size(1), y.size(-1))
return y
class ResampleNorm(nn.Module):
def __init__(self, input_size: 'int', output_size: 'int'=None,
trainable_add: 'bool'=True, residual_upsampling: 'str'=
'interpolation', drop_normalization: 'bool'=False):
super().__init__()
self.input_size = input_size
self.trainable_add = trainable_add
self.output_size = output_size or input_size
self.residual_upsampling = residual_upsampling
self.drop_normalization = drop_normalization
if self.input_size != self.output_size:
if self.residual_upsampling == 'interpolation':
self.resample = TimeDistributedInterpolation(self.
output_size, batch_first=True, trainable=False)
elif self.residual_upsampling == 'learnable':
self.resample = LearnableInterpolation(self.input_size,
self.output_size, trainable=False)
else:
raise RuntimeError(
f'Wrong residual_upsampling method: {self.residual_upsampling}'
)
if self.trainable_add:
self.mask = nn.Parameter(torch.zeros(self.output_size, dtype=
torch.float))
self.gate = nn.Sigmoid()
if self.drop_normalization is False:
self.norm = nn.LayerNorm(self.output_size)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
if self.input_size != self.output_size:
x = self.resample(x)
if self.trainable_add:
x = x * self.gate(self.mask) * 2.0
if self.drop_normalization is False:
output = self.norm(x)
else:
output = x
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp7 * tmp10
tmp12 = tmp11 * tmp5
tmp13 = tmp6 + tmp12
tmp17 = tl.sigmoid(tmp16)
tmp18 = tmp14 * tmp17
tmp19 = tmp18 * tmp5
tmp20 = tmp13 + tmp19
tmp24 = tl.sigmoid(tmp23)
tmp25 = tmp21 * tmp24
tmp26 = tmp25 * tmp5
tmp27 = tmp20 + tmp26
tmp28 = 4.0
tmp29 = tmp27 / tmp28
tmp30 = tmp6 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = tmp12 - tmp29
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tmp19 - tmp29
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp38 = tmp26 - tmp29
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp41 = tmp40 / tmp28
tl.store(out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr1 + x0, tmp41, xmask)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_native_layer_norm_sigmoid_0[grid(64)](primals_2,
primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_native_layer_norm_sigmoid_1[grid(256)](primals_2,
primals_1, buf0, buf1, primals_3, primals_4, buf2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_4
return buf2, primals_1, primals_2, primals_3
class LearnableInterpolation(nn.Module):
def __init__(self, input_size: 'int', output_size: 'int', trainable:
'bool'=False):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.trainable = trainable
self.lin = nn.Linear(in_features=self.input_size, out_features=self
.output_size)
self.init_weights()
if self.trainable:
self.mask = nn.Parameter(torch.zeros(self.output_size, dtype=
torch.float32))
self.gate = nn.Sigmoid()
def init_weights(self):
for name, p in self.named_parameters():
if 'bias' not in name:
torch.nn.init.xavier_uniform_(p)
else:
torch.nn.init.zeros_(p)
def forward(self, x):
upsampled = self.lin(x)
if self.trainable:
upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0
return upsampled
class TimeDistributedInterpolation(nn.Module):
def __init__(self, output_size: 'int', batch_first: 'bool'=False,
trainable: 'bool'=False):
super().__init__()
self.output_size = output_size
self.batch_first = batch_first
self.trainable = trainable
if self.trainable:
self.mask = nn.Parameter(torch.zeros(self.output_size, dtype=
torch.float32))
self.gate = nn.Sigmoid()
def interpolate(self, x):
upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode=
'linear', align_corners=True).squeeze(1)
if self.trainable:
upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0
return upsampled
def forward(self, x):
if len(x.size()) <= 2:
return self.interpolate(x)
x_reshape = x.contiguous().view(-1, x.size(-1))
y = self.interpolate(x_reshape)
if self.batch_first:
y = y.contiguous().view(x.size(0), -1, y.size(-1))
else:
y = y.view(-1, x.size(1), y.size(-1))
return y
class ResampleNormNew(nn.Module):
def __init__(self, input_size: 'int', output_size: 'int'=None,
trainable_add: 'bool'=True, residual_upsampling: 'str'=
'interpolation', drop_normalization: 'bool'=False):
super().__init__()
self.input_size = input_size
self.trainable_add = trainable_add
self.output_size = output_size or input_size
self.residual_upsampling = residual_upsampling
self.drop_normalization = drop_normalization
if self.input_size != self.output_size:
if self.residual_upsampling == 'interpolation':
self.resample = TimeDistributedInterpolation(self.
output_size, batch_first=True, trainable=False)
elif self.residual_upsampling == 'learnable':
self.resample = LearnableInterpolation(self.input_size,
self.output_size, trainable=False)
else:
raise RuntimeError(
f'Wrong residual_upsampling method: {self.residual_upsampling}'
)
if self.trainable_add:
self.mask = nn.Parameter(torch.zeros(self.output_size, dtype=
torch.float))
self.gate = nn.Sigmoid()
if self.drop_normalization is False:
self.norm = nn.LayerNorm(self.output_size)
def forward(self, input_0):
primals_1 = self.mask
primals_3 = self.norm.weight
primals_4 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| IusztinPaul/yacht | ResampleNorm | false | 17,450 | [
"Apache-2.0"
] | 5 | c68ab7c66bde860bb91534c29e97772ba328adb5 | https://github.com/IusztinPaul/yacht/tree/c68ab7c66bde860bb91534c29e97772ba328adb5 |
IntervalObservationEncoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/p3/cp3vh5ge4dyj3r4smr5dvjovuocujri2yvhft44mt2rcrwo3tz5y.py
# Topologically Sorted Source Nodes: [interval_feature_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# interval_feature_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/fw/cfwlxyocz2jq2ugevuvg4kui3fdi3yaixqlfouef4cvo536rdgq6.py
# Topologically Sorted Source Nodes: [interval_feature_2, interval_feature_4], Original ATen: [aten.convolution, aten.mul]
# Source node to ATen node mapping:
# interval_feature_2 => convolution
# interval_feature_4 => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %primals_4), kwargs = {})
triton_poi_fused_convolution_mul_1 = async_compile.triton('triton_poi_fused_convolution_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 208
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 13) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [interval_feature_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [interval_feature_2], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 13), (52, 13, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 13), (52, 13, 1), torch.float32)
# Topologically Sorted Source Nodes: [interval_feature_2, interval_feature_4], Original ATen: [aten.convolution, aten.mul]
triton_poi_fused_convolution_mul_1.run(buf2, primals_3, primals_4, buf3, 208, grid=grid(208), stream=stream0)
del primals_3
return (buf3, primals_2, primals_4, reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class IntervalObservationEncoder(nn.Module):
def __init__(self, num_input_channel: 'int', num_output_channel: 'int',
kernel_size: 'int', initial_output_weight_value: 'float'):
super().__init__()
assert initial_output_weight_value <= 1
self.conv_1d = nn.Conv1d(in_channels=num_input_channel,
out_channels=num_output_channel, kernel_size=kernel_size, stride=1)
self.weight = nn.Parameter(torch.tensor(initial_output_weight_value
).type(torch.FloatTensor), requires_grad=True)
def forward(self, observation: 'torch.Tensor') ->torch.Tensor:
batch_size, _window_size, channel_size, _ = observation.shape
interval_feature = torch.transpose(observation, 1, 2)
interval_feature = interval_feature.reshape(batch_size,
channel_size, -1)
interval_feature = self.conv_1d(interval_feature)
interval_feature = torch.squeeze(interval_feature, dim=-1)
interval_feature = interval_feature * self.weight
return interval_feature
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_input_channel': 4, 'num_output_channel': 4,
'kernel_size': 4, 'initial_output_weight_value': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 208
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 13 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4,
16), (64, 16, 1), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (4, 4, 13), (52, 13, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 13), (52, 13, 1), torch.float32)
triton_poi_fused_convolution_mul_1[grid(208)](buf2, primals_3,
primals_4, buf3, 208, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf3, primals_2, primals_4, reinterpret_tensor(buf0, (4, 4, 16),
(64, 16, 1), 0), buf2
class IntervalObservationEncoderNew(nn.Module):
def __init__(self, num_input_channel: 'int', num_output_channel: 'int',
kernel_size: 'int', initial_output_weight_value: 'float'):
super().__init__()
assert initial_output_weight_value <= 1
self.conv_1d = nn.Conv1d(in_channels=num_input_channel,
out_channels=num_output_channel, kernel_size=kernel_size, stride=1)
self.weight = nn.Parameter(torch.tensor(initial_output_weight_value
).type(torch.FloatTensor), requires_grad=True)
def forward(self, input_0):
primals_4 = self.weight
primals_2 = self.conv_1d.weight
primals_3 = self.conv_1d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| IusztinPaul/yacht | IntervalObservationEncoder | false | 17,451 | [
"Apache-2.0"
] | 5 | c68ab7c66bde860bb91534c29e97772ba328adb5 | https://github.com/IusztinPaul/yacht/tree/c68ab7c66bde860bb91534c29e97772ba328adb5 |
ComplexConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/pb/cpbwrrkesnpqaeykc3dadrokwwtsas7gc34am5tnsdi2zyii5zpr.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# output => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_4, %unsqueeze_5], -1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr2 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr3 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tmp7 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr4 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr5 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1))
buf4 = empty_strided_cuda((4, 1, 1, 2), (2, 2, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf0, primals_3, buf1, primals_5, buf2, buf3, buf4, 8, grid=grid(8), stream=stream0)
del buf0
del buf1
del buf2
del buf3
del primals_3
del primals_5
return (buf4, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 0), reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 1), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class ComplexConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, **kwargs):
super().__init__()
self.conv_re = nn.Conv2d(in_channel, out_channel, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias, **kwargs)
self.conv_im = nn.Conv2d(in_channel, out_channel, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias, **kwargs)
def forward(self, x):
real = self.conv_re(x[..., 0]) - self.conv_im(x[..., 1])
imaginary = self.conv_re(x[..., 1]) + self.conv_im(x[..., 0])
output = torch.stack((real, imaginary), dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tl.load(in_ptr3 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tmp7 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr4 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr5 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 0), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 1, 1), (4, 1, 1, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 1), primals_4, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 1, 1), (4, 1, 1, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 1), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 1, 1), (4, 1, 1, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 0), primals_4, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1))
buf4 = empty_strided_cuda((4, 1, 1, 2), (2, 2, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(8)](buf0, primals_3, buf1, primals_5,
buf2, buf3, buf4, 8, XBLOCK=8, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del buf3
del primals_3
del primals_5
return buf4, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4,
4, 4), (256, 64, 16, 4), 0), reinterpret_tensor(primals_1, (1, 4, 4,
4), (256, 64, 16, 4), 1)
class ComplexConv2dNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, **kwargs):
super().__init__()
self.conv_re = nn.Conv2d(in_channel, out_channel, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias, **kwargs)
self.conv_im = nn.Conv2d(in_channel, out_channel, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias, **kwargs)
def forward(self, input_0):
primals_1 = self.conv_re.weight
primals_3 = self.conv_re.bias
primals_2 = self.conv_im.weight
primals_5 = self.conv_im.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| IIP-Sogang/Audio-Visual-Speech-Recognition | ComplexConv2d | false | 17,452 | [
"MIT"
] | 9 | bd03be91135acbc6162b83092d462b7fe71dd007 | https://github.com/IIP-Sogang/Audio-Visual-Speech-Recognition/tree/bd03be91135acbc6162b83092d462b7fe71dd007 |
SquashingCosine_Classifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/bd/cbdjk2fp76v6laobbjhntdqg75ppchyuouzyputlz4jehcivih74.py
# Topologically Sorted Source Nodes: [norm_x, add, truediv, truediv_1, ex, mul_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# ex => mul
# mul_1 => mul_1
# norm_x => pow_1, pow_2, sum_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=3] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %add), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 16), kwargs = {})
triton_poi_fused_add_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 0.5
tmp13 = tmp11 + tmp12
tmp14 = tmp11 / tmp13
tmp16 = tmp15 / tmp11
tmp17 = tmp14 * tmp16
tmp18 = 16.0
tmp19 = tmp17 * tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5i/c5ihe4rxe4ytbhka5j7rafjwsglswfuumrqesywhmdin7hg4pbhz.py
# Topologically Sorted Source Nodes: [norm_1, ew], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# ew => div_2
# norm_1 => pow_3, pow_4, sum_2
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_4), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_1 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_x, add, truediv, truediv_1, ex, mul_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_1, ew], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_poi_fused_div_linalg_vector_norm_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class SquashingCosine_Classifier(nn.Module):
def __init__(self, in_dims, out_dims, scale=16, margin=0.5, init_std=0.001
):
super(SquashingCosine_Classifier, self).__init__()
self.in_dims = in_dims
self.out_dims = out_dims
self.scale = scale
self.margin = margin
self.weight = Parameter(torch.Tensor(out_dims, in_dims),
requires_grad=True)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, *args):
norm_x = torch.norm(input, 2, 1, keepdim=True)
ex = norm_x / (self.margin + norm_x) * (input / norm_x)
ew = self.weight / torch.norm(self.weight, 2, 1, keepdim=True)
return torch.mm(self.scale * ex, ew.t())
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_dims': 4, 'out_dims': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 0.5
tmp13 = tmp11 + tmp12
tmp14 = tmp11 / tmp13
tmp16 = tmp15 / tmp11
tmp17 = tmp14 * tmp16
tmp18 = 16.0
tmp19 = tmp17 * tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_mul_0[grid(16)](primals_1,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_1[grid(16)](primals_2, buf1,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
del buf1
return buf2, primals_2, buf0
class SquashingCosine_ClassifierNew(nn.Module):
def __init__(self, in_dims, out_dims, scale=16, margin=0.5, init_std=0.001
):
super(SquashingCosine_ClassifierNew, self).__init__()
self.in_dims = in_dims
self.out_dims = out_dims
self.scale = scale
self.margin = margin
self.weight = Parameter(torch.Tensor(out_dims, in_dims),
requires_grad=True)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| JKozerawski/BLT | SquashingCosine_Classifier | false | 17,453 | [
"MIT"
] | 5 | 6f3a6f4dc3c832b62c4ac3f3baf34b6a0bd6e181 | https://github.com/JKozerawski/BLT/tree/6f3a6f4dc3c832b62c4ac3f3baf34b6a0bd6e181 |
FscoreMetric | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/qq/cqqvrvan4bzzsswuvaryovezc23z5pci6sqtkx7kyra74zq4un56.py
# Topologically Sorted Source Nodes: [gt, pr, mul, tp, mul_1, add, mul_2, sum_3, fn, mul_3, add_1, sum_2, fp, add_2, add_3, score], Original ATen: [aten.gt, aten._to_copy, aten.mul, aten.sum, aten.add, aten.sub, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# fn => sub_1
# fp => sub
# gt => gt
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pr => convert_element_type
# score => div
# sum_2 => sum_2
# sum_3 => sum_3
# tp => sum_1
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0.5), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %convert_element_type), kwargs = {})
# %sum_1 : [num_users=4] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-07), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg1_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %sum_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_2, %sum_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %sub), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-07), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_3), kwargs = {})
triton_per_fused__to_copy_add_div_gt_mul_sub_sum_0 = async_compile.triton('triton_per_fused__to_copy_add_div_gt_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_gt_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_gt_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = 0.5
tmp3 = tmp1 > tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp0, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tl.broadcast_to(tmp4, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 2.0
tmp16 = tmp8 * tmp15
tmp17 = 1e-07
tmp18 = tmp16 + tmp17
tmp19 = tmp11 - tmp8
tmp20 = 1.0
tmp21 = tmp19 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = tmp14 - tmp8
tmp24 = tmp22 + tmp23
tmp25 = tmp24 + tmp17
tmp26 = tmp18 / tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp26, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [gt, pr, mul, tp, mul_1, add, mul_2, sum_3, fn, mul_3, add_1, sum_2, fp, add_2, add_3, score], Original ATen: [aten.gt, aten._to_copy, aten.mul, aten.sum, aten.add, aten.sub, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_div_gt_mul_sub_sum_0.run(buf3, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def f_score(pr, gt, beta=1, eps=1e-07, threshold=0.5):
"""dice score(also referred to as F1-score)"""
if threshold is not None:
pr = (pr > threshold).float()
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = ((1 + beta ** 2) * tp + eps) / ((1 + beta ** 2) * tp + beta **
2 * fn + fp + eps)
return score
class FscoreMetric(nn.Module):
__name__ = 'f-score'
def __init__(self, beta=1, eps=1e-07, threshold=0.5):
super().__init__()
self.eps = eps
self.threshold = threshold
self.beta = beta
def forward(self, y_pr, y_gt):
return f_score(y_pr, y_gt, self.beta, self.eps, self.threshold)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_div_gt_mul_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 0.5
tmp3 = tmp1 > tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp0, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tl.broadcast_to(tmp4, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 2.0
tmp16 = tmp8 * tmp15
tmp17 = 1e-07
tmp18 = tmp16 + tmp17
tmp19 = tmp11 - tmp8
tmp20 = 1.0
tmp21 = tmp19 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = tmp14 - tmp8
tmp24 = tmp22 + tmp23
tmp25 = tmp24 + tmp17
tmp26 = tmp18 / tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_add_div_gt_mul_sub_sum_0[grid(1)](buf3,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def f_score(pr, gt, beta=1, eps=1e-07, threshold=0.5):
"""dice score(also referred to as F1-score)"""
if threshold is not None:
pr = (pr > threshold).float()
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = ((1 + beta ** 2) * tp + eps) / ((1 + beta ** 2) * tp + beta **
2 * fn + fp + eps)
return score
class FscoreMetricNew(nn.Module):
__name__ = 'f-score'
def __init__(self, beta=1, eps=1e-07, threshold=0.5):
super().__init__()
self.eps = eps
self.threshold = threshold
self.beta = beta
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| JACKYLUO1991/HybridNet | FscoreMetric | false | 17,454 | [
"Apache-2.0"
] | 6 | eb97d8a048ca4bb4087bc542360172e169a08dbf | https://github.com/JACKYLUO1991/HybridNet/tree/eb97d8a048ca4bb4087bc542360172e169a08dbf |
MnistMLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/yx/cyxilln4jo2u7t7lrgrmhafkgxlpwskpp43lx4owvydetlrfon7f.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (500, 784), (784, 1))
assert_size_stride(primals_3, (500, ), (1, ))
assert_size_stride(primals_4, (10, 500), (500, 1))
assert_size_stride(primals_5, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 500), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 2000, grid=grid(2000), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (500, 10), (1, 500), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (buf2, primals_1, buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((500, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 500), (500, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
import torch.onnx
from torch.optim.lr_scheduler import *
class MnistMLP(nn.Module):
def __init__(self, hidden_size=500):
super(MnistMLP, self).__init__()
self.hidden_size = hidden_size
self.fc1 = nn.Linear(784, hidden_size)
self.fc2 = nn.Linear(hidden_size, 10)
def forward(self, x):
x = x.view(-1, 784)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.onnx
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (500, 784), (784, 1))
assert_size_stride(primals_3, (500,), (1,))
assert_size_stride(primals_4, (10, 500), (500, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
500), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(2000)](buf1, primals_3, 2000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(500, 10), (1, 500), 0), alpha=1, beta=1, out=buf2)
del primals_5
return buf2, primals_1, buf1, primals_4
class MnistMLPNew(nn.Module):
def __init__(self, hidden_size=500):
super(MnistMLPNew, self).__init__()
self.hidden_size = hidden_size
self.fc1 = nn.Linear(784, hidden_size)
self.fc2 = nn.Linear(hidden_size, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| IST-DASLab/ACDC | MnistMLP | false | 17,455 | [
"Apache-2.0"
] | 6 | ac53210b6adc1f2506ff909de08172ed9cad25d5 | https://github.com/IST-DASLab/ACDC/tree/ac53210b6adc1f2506ff909de08172ed9cad25d5 |
SqueezeAndExcite | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/yg/cygooswl5gkxugqq2ejgag2vtcqhtumn2j3notsgzty3xoxbrq4v.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# y => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qy/cqy5s4xoujqirmjfiiqspuz3erkoc6o3ubu7jpl4cej473kypgdv.py
# Topologically Sorted Source Nodes: [conv2d, y_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# y_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/x7/cx7evvvm7te22h7xf3yh7pnjatqie5vy54vyorfffrtctztd4wn5.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vz/cvzlcxmuowtstgqrxzb5hcsechd32n3vjbzbpf457cjjvsojtkea.py
# Topologically Sorted Source Nodes: [y_2, y_3], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# y_2 => sigmoid
# y_3 => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 1, 1), (16, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d, y_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_2, y_3], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf5, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SqueezeAndExcite(nn.Module):
def __init__(self, channels, squeeze_channels, se_ratio):
super(SqueezeAndExcite, self).__init__()
squeeze_channels = squeeze_channels * se_ratio
if not squeeze_channels.is_integer():
raise ValueError('channels must be divisible by 1 / ratio')
squeeze_channels = int(squeeze_channels)
self.se_reduce = nn.Conv2d(channels, squeeze_channels, 1, 1, 0,
bias=True)
self.non_linear1 = nn.ReLU(inplace=True)
self.se_expand = nn.Conv2d(squeeze_channels, channels, 1, 1, 0,
bias=True)
self.non_linear2 = nn.Sigmoid()
def forward(self, x):
y = torch.mean(x, (2, 3), keepdim=True)
y = self.non_linear1(self.se_reduce(y))
y = self.non_linear2(self.se_expand(y))
y = x * y
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'squeeze_channels': 4, 'se_ratio': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 1, 1), (16, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(64)](buf3, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5
class SqueezeAndExciteNew(nn.Module):
def __init__(self, channels, squeeze_channels, se_ratio):
super(SqueezeAndExciteNew, self).__init__()
squeeze_channels = squeeze_channels * se_ratio
if not squeeze_channels.is_integer():
raise ValueError('channels must be divisible by 1 / ratio')
squeeze_channels = int(squeeze_channels)
self.se_reduce = nn.Conv2d(channels, squeeze_channels, 1, 1, 0,
bias=True)
self.non_linear1 = nn.ReLU(inplace=True)
self.se_expand = nn.Conv2d(squeeze_channels, channels, 1, 1, 0,
bias=True)
self.non_linear2 = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.se_reduce.weight
primals_3 = self.se_reduce.bias
primals_4 = self.se_expand.weight
primals_5 = self.se_expand.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| JACKYLUO1991/HybridNet | SqueezeAndExcite | false | 17,456 | [
"Apache-2.0"
] | 6 | eb97d8a048ca4bb4087bc542360172e169a08dbf | https://github.com/JACKYLUO1991/HybridNet/tree/eb97d8a048ca4bb4087bc542360172e169a08dbf |
ResForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nk/cnkqvnf65csnchzgcwfmzvir35zlp7mwibh6ixgyd2q5lxsz4ye6.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/56/c56setgywsq6grelphibiuqwm3o2wx6hltvac4jtl3mjvpggrk2n.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vo/cvobgo2rkbxi5a23n5cdy3xwawdecd2z4cog4ffjpfkk54k2exsk.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %primals_2], 2), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 8, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + ((4*x1) + ((-4) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/eg/cegackn7bvyolprkpsm5ilfrsrowlsuvewff4ghjt5jnchv6nfzr.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.leaky_relu, aten.add]
# Source node to ATen node mapping:
# x => mul, where
# x_1 => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %where), kwargs = {})
triton_poi_fused_add_leaky_relu_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask).to(tl.int1)
tmp4 = tl.load(in_ptr3 + (x2), xmask)
tmp5 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp6 = tmp4 + tmp5
tmp7 = 0.01
tmp8 = tmp6 * tmp7
tmp9 = tl.where(tmp3, tmp6, tmp8)
tmp10 = tmp2 + tmp9
tl.store(out_ptr0 + (x0 + (8*x1)), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6b/c6bkw637b2rgvbdyx7nsin3z4x3yjv6og3wq2darcuuhtm6waqpd.py
# Topologically Sorted Source Nodes: [cat_3, cat_7], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# cat_7 => cat_7
# Graph fragment:
# %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add, %primals_2], 2), kwargs = {})
# %cat_7 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_2, %primals_2], 2), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask)
tl.store(out_ptr1 + (x0 + (8*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/gg/cggxihxdwvmdp3vsfke5mjz7d66x47pkixyiwsmrtsggb5fh7b4i.py
# Topologically Sorted Source Nodes: [cat_5], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_5 => cat_5
# Graph fragment:
# %cat_5 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_1, %primals_2], 2), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr2 + ((8*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr3 + ((4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/wk/cwkup6cnnudjdmh53i2yxtkv6lk6hiptcldreolpc4r7xv7aamzc.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_1
# x_3 => add_2
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %add), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_13, %add_1), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x0 + (8*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp2 + tmp7
tl.store(out_ptr0 + (x0 + (8*x1)), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 8), (8, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 8), (8, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 8), (8, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 8), (8, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, 8), (8, 1))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (4, 8), (8, 1))
assert_size_stride(primals_22, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf1, primals_4, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf2, buf1, primals_4, primals_2, buf3, 128, grid=grid(128), stream=stream0)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf4, primals_6, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf5, buf4, primals_6, primals_2, buf6, 128, grid=grid(128), stream=stream0)
del primals_6
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0), reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf7)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf8 = reinterpret_tensor(buf10, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.leaky_relu, aten.add]
triton_poi_fused_add_leaky_relu_3.run(buf7, primals_8, buf2, buf1, primals_4, buf8, 64, grid=grid(64), stream=stream0)
del primals_4
del primals_8
buf9 = reinterpret_tensor(buf10, (4, 4, 4), (32, 8, 1), 4) # alias
buf22 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf21 = reinterpret_tensor(buf22, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [cat_3, cat_7], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(primals_2, buf9, buf21, 64, grid=grid(64), stream=stream0)
buf11 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf10, (16, 8), (8, 1), 0), reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [res_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf11, primals_10, buf12, 64, grid=grid(64), stream=stream0)
buf13 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf12, buf11, primals_10, primals_2, buf13, 128, grid=grid(128), stream=stream0)
del primals_10
buf14 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf13, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf14)
buf15 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_5], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf14, primals_12, buf8, primals_2, buf15, 128, grid=grid(128), stream=stream0)
buf16 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf15, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf16)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [res_4], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf16, primals_14, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_6], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf17, buf16, primals_14, primals_2, buf18, 128, grid=grid(128), stream=stream0)
del primals_14
buf19 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf19)
buf20 = reinterpret_tensor(buf22, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf19, primals_16, buf14, primals_12, buf8, buf20, 64, grid=grid(64), stream=stream0)
del buf14
del primals_12
del primals_16
buf23 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf22, (16, 8), (8, 1), 0), reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf23)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [res_6], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf23, primals_18, buf24, 64, grid=grid(64), stream=stream0)
buf25 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_8], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf24, buf23, primals_18, primals_2, buf25, 128, grid=grid(128), stream=stream0)
del primals_18
buf26 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf25, (16, 8), (8, 1), 0), reinterpret_tensor(primals_19, (8, 4), (1, 8), 0), out=buf26)
buf27 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_9], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf26, primals_20, buf20, primals_2, buf27, 128, grid=grid(128), stream=stream0)
del primals_2
del primals_20
buf28 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_22, reinterpret_tensor(buf27, (16, 8), (8, 1), 0), reinterpret_tensor(primals_21, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf28)
del primals_22
return (reinterpret_tensor(buf28, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, reinterpret_tensor(buf3, (16, 8), (8, 1), 0), buf5, reinterpret_tensor(buf6, (16, 8), (8, 1), 0), reinterpret_tensor(buf10, (16, 8), (8, 1), 0), buf12, reinterpret_tensor(buf13, (16, 8), (8, 1), 0), reinterpret_tensor(buf15, (16, 8), (8, 1), 0), buf17, reinterpret_tensor(buf18, (16, 8), (8, 1), 0), reinterpret_tensor(buf22, (16, 8), (8, 1), 0), buf24, reinterpret_tensor(buf25, (16, 8), (8, 1), 0), reinterpret_tensor(buf27, (16, 8), (8, 1), 0), primals_21, primals_19, primals_17, primals_15, primals_13, primals_11, primals_9, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.distributed
class ResBlock(nn.Module):
def __init__(self, feature_size, action_size):
super(ResBlock, self).__init__()
self.lin_1 = nn.Linear(feature_size + action_size, feature_size)
self.lin_2 = nn.Linear(feature_size + action_size, feature_size)
def forward(self, x, action):
res = nn.functional.leaky_relu(self.lin_1(torch.cat([x, action], 2)))
res = self.lin_2(torch.cat([res, action], 2))
return res + x
class ResForward(nn.Module):
def __init__(self, feature_size, action_size):
super(ResForward, self).__init__()
self.lin_1 = nn.Linear(feature_size + action_size, feature_size)
self.res_block_1 = ResBlock(feature_size, action_size)
self.res_block_2 = ResBlock(feature_size, action_size)
self.res_block_3 = ResBlock(feature_size, action_size)
self.res_block_4 = ResBlock(feature_size, action_size)
self.lin_last = nn.Linear(feature_size + action_size, feature_size)
def forward(self, phi1, action):
x = nn.functional.leaky_relu(self.lin_1(torch.cat([phi1, action], 2)))
x = self.res_block_1(x, action)
x = self.res_block_2(x, action)
x = self.res_block_3(x, action)
x = self.res_block_4(x, action)
x = self.lin_last(torch.cat([x, action], 2))
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'feature_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp17 = tl.load(in_ptr3 + (4 * x1 + (-4 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_add_leaky_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask).to(tl.int1)
tmp4 = tl.load(in_ptr3 + x2, xmask)
tmp5 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp6 = tmp4 + tmp5
tmp7 = 0.01
tmp8 = tmp6 * tmp7
tmp9 = tl.where(tmp3, tmp6, tmp8)
tmp10 = tmp2 + tmp9
tl.store(out_ptr0 + (x0 + 8 * x1), tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
tl.store(out_ptr1 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr2 + (8 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = tl.load(in_ptr3 + (4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x0 + 8 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp2 + tmp7
tl.store(out_ptr0 + (x0 + 8 * x1), tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 8), (8, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 8), (8, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 8), (8, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 8), (8, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4, 8), (8, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4, 8), (8, 1))
assert_size_stride(primals_22, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf1, primals_4, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf2, buf1, primals_4, primals_2,
buf3, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf4, primals_6, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf5, buf4, primals_6, primals_2,
buf6, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
buf7 = buf4
del buf4
extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf7)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf8 = reinterpret_tensor(buf10, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_add_leaky_relu_3[grid(64)](buf7, primals_8, buf2,
buf1, primals_4, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
del primals_8
buf9 = reinterpret_tensor(buf10, (4, 4, 4), (32, 8, 1), 4)
buf22 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf21 = reinterpret_tensor(buf22, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_4[grid(64)](primals_2, buf9, buf21, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf11 = buf7
del buf7
extern_kernels.mm(reinterpret_tensor(buf10, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf11, primals_10, buf12,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf12, buf11, primals_10,
primals_2, buf13, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_10
buf14 = buf11
del buf11
extern_kernels.mm(reinterpret_tensor(buf13, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf14)
buf15 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_5[grid(128)](buf14, primals_12, buf8,
primals_2, buf15, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf16 = buf1
del buf1
extern_kernels.mm(reinterpret_tensor(buf15, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf16)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf16, primals_14, buf17,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf17, buf16, primals_14,
primals_2, buf18, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_14
buf19 = buf16
del buf16
extern_kernels.mm(reinterpret_tensor(buf18, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf19)
buf20 = reinterpret_tensor(buf22, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_add_6[grid(64)](buf19, primals_16, buf14,
primals_12, buf8, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf14
del primals_12
del primals_16
buf23 = buf19
del buf19
extern_kernels.mm(reinterpret_tensor(buf22, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_17, (8, 4), (1, 8), 0), out=buf23)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf23, primals_18, buf24,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf24, buf23, primals_18,
primals_2, buf25, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_18
buf26 = buf23
del buf23
extern_kernels.mm(reinterpret_tensor(buf25, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_19, (8, 4), (1, 8), 0), out=buf26)
buf27 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_5[grid(128)](buf26, primals_20, buf20,
primals_2, buf27, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_20
buf28 = buf26
del buf26
extern_kernels.addmm(primals_22, reinterpret_tensor(buf27, (16, 8),
(8, 1), 0), reinterpret_tensor(primals_21, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf28)
del primals_22
return (reinterpret_tensor(buf28, (4, 4, 4), (16, 4, 1), 0),
reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2,
reinterpret_tensor(buf3, (16, 8), (8, 1), 0), buf5,
reinterpret_tensor(buf6, (16, 8), (8, 1), 0), reinterpret_tensor(
buf10, (16, 8), (8, 1), 0), buf12, reinterpret_tensor(buf13, (16, 8
), (8, 1), 0), reinterpret_tensor(buf15, (16, 8), (8, 1), 0), buf17,
reinterpret_tensor(buf18, (16, 8), (8, 1), 0), reinterpret_tensor(
buf22, (16, 8), (8, 1), 0), buf24, reinterpret_tensor(buf25, (16, 8
), (8, 1), 0), reinterpret_tensor(buf27, (16, 8), (8, 1), 0),
primals_21, primals_19, primals_17, primals_15, primals_13,
primals_11, primals_9, primals_7, primals_5)
class ResBlock(nn.Module):
def __init__(self, feature_size, action_size):
super(ResBlock, self).__init__()
self.lin_1 = nn.Linear(feature_size + action_size, feature_size)
self.lin_2 = nn.Linear(feature_size + action_size, feature_size)
def forward(self, x, action):
res = nn.functional.leaky_relu(self.lin_1(torch.cat([x, action], 2)))
res = self.lin_2(torch.cat([res, action], 2))
return res + x
class ResForwardNew(nn.Module):
def __init__(self, feature_size, action_size):
super(ResForwardNew, self).__init__()
self.lin_1 = nn.Linear(feature_size + action_size, feature_size)
self.res_block_1 = ResBlock(feature_size, action_size)
self.res_block_2 = ResBlock(feature_size, action_size)
self.res_block_3 = ResBlock(feature_size, action_size)
self.res_block_4 = ResBlock(feature_size, action_size)
self.lin_last = nn.Linear(feature_size + action_size, feature_size)
def forward(self, input_0, input_1):
primals_3 = self.lin_1.weight
primals_4 = self.lin_1.bias
primals_5 = self.res_block_1.lin_1.weight
primals_6 = self.res_block_1.lin_1.bias
primals_7 = self.res_block_1.lin_2.weight
primals_8 = self.res_block_1.lin_2.bias
primals_9 = self.res_block_2.lin_1.weight
primals_10 = self.res_block_2.lin_1.bias
primals_11 = self.res_block_2.lin_2.weight
primals_12 = self.res_block_2.lin_2.bias
primals_13 = self.res_block_3.lin_1.weight
primals_14 = self.res_block_3.lin_1.bias
primals_15 = self.res_block_3.lin_2.weight
primals_16 = self.res_block_3.lin_2.bias
primals_17 = self.res_block_4.lin_1.weight
primals_18 = self.res_block_4.lin_1.bias
primals_19 = self.res_block_4.lin_2.weight
primals_20 = self.res_block_4.lin_2.bias
primals_21 = self.lin_last.weight
primals_22 = self.lin_last.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22])
return output[0]
| Improbable-AI/curiosity_baselines | ResForward | false | 17,457 | [
"MIT"
] | 5 | 42dca92b2fb66c0790a72206bf48595d3b5b487f | https://github.com/Improbable-AI/curiosity_baselines/tree/42dca92b2fb66c0790a72206bf48595d3b5b487f |
Autoencoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/sm/csmenfx32ekzp3jy4hjjyfdm37wlwf77smzilpa5revmsd7esfwf.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ox/coxzkq4kueijjjccjhdtvzhfquura4nj5lgusydc343b4la3telx.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x_2 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf12, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf4, primals_5, buf11, 256, grid=grid(256), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf7, primals_7, buf10, 256, grid=grid(256), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf7, (64, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_9
return (reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, buf8, primals_8, buf10, primals_6, buf11, primals_4, buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Autoencoder(nn.Module):
def __init__(self, input_dim, output_dim, n_hid, n_bottleneck):
super(Autoencoder, self).__init__()
self.fc1 = nn.Linear(input_dim, n_hid)
self.fc2 = nn.Linear(n_hid, n_bottleneck)
self.fc3 = nn.Linear(n_bottleneck, n_hid)
self.fc4 = nn.Linear(n_hid, output_dim)
self.act = nn.ReLU(inplace=True)
def forward(self, x):
None
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
x = self.act(x)
x = self.fc3(x)
x = self.act(x)
y = self.fc4(x)
return y
def get_features(self, x):
return None
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'n_hid': 4,
'n_bottleneck': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf4,
primals_5, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4
), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf7,
primals_7, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (64, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_9
return reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, buf8, primals_8, buf10, primals_6, buf11, primals_4, buf12
class AutoencoderNew(nn.Module):
def __init__(self, input_dim, output_dim, n_hid, n_bottleneck):
super(AutoencoderNew, self).__init__()
self.fc1 = nn.Linear(input_dim, n_hid)
self.fc2 = nn.Linear(n_hid, n_bottleneck)
self.fc3 = nn.Linear(n_bottleneck, n_hid)
self.fc4 = nn.Linear(n_hid, output_dim)
self.act = nn.ReLU(inplace=True)
def get_features(self, x):
return None
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| JavierAntoran/tiger-costume | Autoencoder | false | 17,458 | [
"MIT"
] | 10 | 975661dfab2c435281f74c6be86529b16881ebcb | https://github.com/JavierAntoran/tiger-costume/tree/975661dfab2c435281f74c6be86529b16881ebcb |
DQfDNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/v2/cv24vzifrszeh6ykktpm44yxw6ipfsqq636xos3qcxnfqk5ld5ed.py
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4x/c4xd6y4gkp7z3srq6gzq52swaegpimvl35zpaduo4j5wyernpskh.py
# Topologically Sorted Source Nodes: [res], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# res => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mi/cmibf5zezxd6g5fvwgrxm77t4io4cybzrauehr6ghekpfqjr2jwl.py
# Topologically Sorted Source Nodes: [res], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# res => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (30, 4), (4, 1))
assert_size_stride(primals_2, (30, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (30, 30), (30, 1))
assert_size_stride(primals_5, (30, ), (1, ))
assert_size_stride(primals_6, (4, 30), (30, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 1920, grid=grid(1920), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 30), (30, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 30), (30, 1), 0), reinterpret_tensor(primals_4, (30, 30), (1, 30), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 30), (480, 120, 30, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool)
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 1920, grid=grid(1920), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 30), (30, 1), 0), reinterpret_tensor(primals_6, (30, 4), (1, 30), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [res], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [res], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 30), (30, 1), 0), reinterpret_tensor(buf3, (64, 30), (30, 1), 0), buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((30, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((30, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((30, 30), (30, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((30, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 30), (30, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class DQfDNetwork(nn.Module):
def __init__(self, in_size, out_size):
super(DQfDNetwork, self).__init__()
HIDDEN_SIZE = 30
self.f1 = nn.Linear(in_size, HIDDEN_SIZE)
self.f2 = nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE)
self.f3 = nn.Linear(HIDDEN_SIZE, out_size)
nn.init.xavier_uniform_(self.f1.weight)
nn.init.xavier_uniform_(self.f2.weight)
nn.init.xavier_uniform_(self.f3.weight)
self.opt = torch.optim.Adam(self.parameters())
self.loss = torch.nn.MSELoss()
def forward(self, x):
x1 = F.relu(self.f1(x))
x2 = F.relu(self.f2(x1))
x3 = self.f3(x2)
res = F.softmax(x3)
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'out_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (30, 4), (4, 1))
assert_size_stride(primals_2, (30,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (30, 30), (30, 1))
assert_size_stride(primals_5, (30,), (1,))
assert_size_stride(primals_6, (4, 30), (30, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1920)](buf1,
primals_2, buf8, 1920, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 30), (30, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 30), (30, 1), 0),
reinterpret_tensor(primals_4, (30, 30), (1, 30), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 30), (480, 120, 30, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(1920)](buf3,
primals_5, buf7, 1920, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 30),
(30, 1), 0), reinterpret_tensor(primals_6, (30, 4), (1, 30), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 30), (30, 1), 0), reinterpret_tensor(
buf3, (64, 30), (30, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class DQfDNetworkNew(nn.Module):
def __init__(self, in_size, out_size):
super(DQfDNetworkNew, self).__init__()
HIDDEN_SIZE = 30
self.f1 = nn.Linear(in_size, HIDDEN_SIZE)
self.f2 = nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE)
self.f3 = nn.Linear(HIDDEN_SIZE, out_size)
nn.init.xavier_uniform_(self.f1.weight)
nn.init.xavier_uniform_(self.f2.weight)
nn.init.xavier_uniform_(self.f3.weight)
self.opt = torch.optim.Adam(self.parameters())
self.loss = torch.nn.MSELoss()
def forward(self, input_0):
primals_1 = self.f1.weight
primals_2 = self.f1.bias
primals_4 = self.f2.weight
primals_5 = self.f2.bias
primals_6 = self.f3.weight
primals_7 = self.f3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| DPS0340/DQNDemo | DQfDNetwork | false | 17,459 | [
"MIT"
] | 8 | 5b57159ea8ff8a6b127cb18ff28da6696b40665b | https://github.com/DPS0340/DQNDemo/tree/5b57159ea8ff8a6b127cb18ff28da6696b40665b |
NeuralClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/i3/ci3g2zkf2ijbihvxbhgfiint2ko3m4p3f7cltm2s2w4bds76jlj4.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [sigmoid_1], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class NeuralClassifier(nn.Module):
def __init__(self, input_size, n_classes):
super(NeuralClassifier, self).__init__()
self.input_size = input_size
self.mapping1 = nn.Linear(input_size, input_size)
self.mapping2 = nn.Linear(input_size, n_classes)
self.f = torch.sigmoid
def forward(self, x):
x = self.f(self.mapping1(x))
return self.f(self.mapping2(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'n_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_0[grid(256)](buf3, primals_5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class NeuralClassifierNew(nn.Module):
def __init__(self, input_size, n_classes):
super(NeuralClassifierNew, self).__init__()
self.input_size = input_size
self.mapping1 = nn.Linear(input_size, input_size)
self.mapping2 = nn.Linear(input_size, n_classes)
self.f = torch.sigmoid
def forward(self, input_0):
primals_1 = self.mapping1.weight
primals_2 = self.mapping1.bias
primals_4 = self.mapping2.weight
primals_5 = self.mapping2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| JayWalker512/PacketGAN | NeuralClassifier | false | 17,460 | [
"MIT"
] | 5 | 93d4266ab9299c25ffd1f0aedf68fa4639f66572 | https://github.com/JayWalker512/PacketGAN/tree/93d4266ab9299c25ffd1f0aedf68fa4639f66572 |
Sinkhorn | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/iq/ciq7mmrzn6e2zgd3jfswpnffgdt7sbremdxqcjozgter4m4zifja.py
# Topologically Sorted Source Nodes: [mul, sum_2], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# sum_2 => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_5, %unsqueeze_6), kwargs = {})
# %sum_1 : [num_users=4] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {})
triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 64)
x0 = xindex % 16
x3 = xindex
tmp15 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 2, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp0 == tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp6, tmp9, tmp11)
tmp13 = tl.where(tmp4, tmp9, tmp12)
tmp14 = tl.where(tmp2, tmp9, tmp13)
tmp16 = 0.0001
tmp17 = tmp15 + tmp16
tmp18 = tmp14 * tmp17
tmp20 = tmp19 + tmp16
tmp21 = tmp14 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp23 + tmp16
tmp25 = tmp14 * tmp24
tmp26 = tmp22 + tmp25
tmp28 = tmp27 + tmp16
tmp29 = tmp14 * tmp28
tmp30 = tmp26 + tmp29
tl.store(out_ptr0 + (x3), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/gu/cgubzjux25dp5x4fi7dspqsukbwyckbp6nlulcysyno6mxhjhxhw.py
# Topologically Sorted Source Nodes: [tmp, s, truediv, truediv_1, truediv_2, truediv_3, s_1], Original ATen: [aten.zeros_like, aten.add, aten.reciprocal, aten.mul]
# Source node to ATen node mapping:
# s => add
# s_1 => mul_5
# tmp => full_default_10
# truediv => mul_1, reciprocal
# truediv_1 => mul_2, reciprocal_1
# truediv_2 => mul_3, reciprocal_2
# truediv_3 => mul_4, reciprocal_3
# Graph fragment:
# %full_default_10 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 0.0001), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_22,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1), kwargs = {})
# %select_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default_10, %mul_1, 0, 0), kwargs = {})
# %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_25,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 1), kwargs = {})
# %select_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_4, %mul_2, 0, 1), kwargs = {})
# %reciprocal_2 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_29,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_2, 1), kwargs = {})
# %select_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %mul_3, 0, 2), kwargs = {})
# %reciprocal_3 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_33,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_3, 1), kwargs = {})
# %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_6, %mul_4, 0, 3), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %select_scatter_default_7), kwargs = {})
# %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %add), kwargs = {})
triton_poi_fused_add_mul_reciprocal_zeros_like_1 = async_compile.triton('triton_poi_fused_add_mul_reciprocal_zeros_like_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_reciprocal_zeros_like_1', 'mutated_arg_names': ['in_ptr0', 'out_ptr2'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_reciprocal_zeros_like_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 0.0001
tmp2 = tmp0 + tmp1
tmp3 = x1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp7 / tmp6
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tl.full([1], 2, tl.int32)
tmp12 = tmp3 == tmp11
tmp14 = tmp7 / tmp13
tmp15 = tmp14 * tmp9
tmp16 = tmp3 == tmp7
tmp18 = tmp7 / tmp17
tmp19 = tmp18 * tmp9
tmp20 = tl.full([1], 0, tl.int32)
tmp21 = tmp3 == tmp20
tmp23 = tmp7 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = 0.0
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp16, tmp19, tmp26)
tmp28 = tl.where(tmp12, tmp15, tmp27)
tmp29 = tl.where(tmp5, tmp10, tmp28)
tmp30 = tmp2 * tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
tl.store(out_ptr2 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/qi/cqinwcklfygettt3n4ryd5ly6pfkljav3ym2fvohwfbkio4mxwm7.py
# Topologically Sorted Source Nodes: [tmp_1, truediv_4, truediv_5, truediv_6, truediv_7], Original ATen: [aten.zeros_like, aten.reciprocal, aten.mul]
# Source node to ATen node mapping:
# tmp_1 => full_default_11
# truediv_4 => mul_7, reciprocal_4
# truediv_5 => mul_8, reciprocal_5
# truediv_6 => mul_9, reciprocal_6
# truediv_7 => mul_10, reciprocal_7
# Graph fragment:
# %full_default_11 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %reciprocal_4 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_37,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_4, 1), kwargs = {})
# %select_scatter_default_12 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default_11, %mul_7, 0, 0), kwargs = {})
# %reciprocal_5 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_40,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_5, 1), kwargs = {})
# %select_scatter_default_13 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_12, %mul_8, 0, 1), kwargs = {})
# %reciprocal_6 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_44,), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_6, 1), kwargs = {})
# %select_scatter_default_14 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_13, %mul_9, 0, 2), kwargs = {})
# %reciprocal_7 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_48,), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_7, 1), kwargs = {})
# %select_scatter_default_15 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_14, %mul_10, 0, 3), kwargs = {})
triton_poi_fused_mul_reciprocal_zeros_like_2 = async_compile.triton('triton_poi_fused_mul_reciprocal_zeros_like_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_zeros_like_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reciprocal_zeros_like_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x0 = xindex % 4
x2 = (xindex // 16) % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (64 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (68 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr0 + (72 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (76 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr0 + (128 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr0 + (132 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp67 = tl.load(in_ptr0 + (136 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr0 + (140 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp77 = tl.load(in_ptr0 + (192 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp87 = tl.load(in_ptr0 + (196 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr0 + (200 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp93 = tl.load(in_ptr0 + (204 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp0 = x3
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tl.full([1], 2, tl.int32)
tmp7 = tmp1 == tmp6
tmp8 = tl.full([1], 1, tl.int32)
tmp9 = tmp1 == tmp8
tmp10 = tmp1 == tmp1
tmp11 = 1.0
tmp12 = 0.0
tmp13 = tl.where(tmp10, tmp11, tmp12)
tmp14 = tl.where(tmp9, tmp11, tmp13)
tmp15 = tl.where(tmp7, tmp11, tmp14)
tmp16 = tl.where(tmp5, tmp11, tmp15)
tmp17 = tmp3 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp16
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp16
tmp26 = tmp23 + tmp25
tmp27 = tmp8 / tmp26
tmp28 = tmp27 * tmp11
tmp29 = tl.where(tmp2, tmp28, tmp12)
tmp30 = tmp0 == tmp8
tmp32 = tmp8 == tmp4
tmp33 = tmp8 == tmp6
tmp34 = tmp8 == tmp8
tmp35 = tmp8 == tmp1
tmp36 = tl.where(tmp35, tmp11, tmp12)
tmp37 = tl.where(tmp34, tmp11, tmp36)
tmp38 = tl.where(tmp33, tmp11, tmp37)
tmp39 = tl.where(tmp32, tmp11, tmp38)
tmp40 = tmp31 * tmp39
tmp42 = tmp41 * tmp39
tmp43 = tmp40 + tmp42
tmp45 = tmp44 * tmp39
tmp46 = tmp43 + tmp45
tmp48 = tmp47 * tmp39
tmp49 = tmp46 + tmp48
tmp50 = tmp8 / tmp49
tmp51 = tmp50 * tmp11
tmp52 = tl.where(tmp30, tmp51, tmp29)
tmp53 = tmp0 == tmp6
tmp55 = tmp6 == tmp4
tmp56 = tmp6 == tmp6
tmp57 = tmp6 == tmp8
tmp58 = tmp6 == tmp1
tmp59 = tl.where(tmp58, tmp11, tmp12)
tmp60 = tl.where(tmp57, tmp11, tmp59)
tmp61 = tl.where(tmp56, tmp11, tmp60)
tmp62 = tl.where(tmp55, tmp11, tmp61)
tmp63 = tmp54 * tmp62
tmp65 = tmp64 * tmp62
tmp66 = tmp63 + tmp65
tmp68 = tmp67 * tmp62
tmp69 = tmp66 + tmp68
tmp71 = tmp70 * tmp62
tmp72 = tmp69 + tmp71
tmp73 = tmp8 / tmp72
tmp74 = tmp73 * tmp11
tmp75 = tl.where(tmp53, tmp74, tmp52)
tmp76 = tmp0 == tmp4
tmp78 = tmp4 == tmp4
tmp79 = tmp4 == tmp6
tmp80 = tmp4 == tmp8
tmp81 = tmp4 == tmp1
tmp82 = tl.where(tmp81, tmp11, tmp12)
tmp83 = tl.where(tmp80, tmp11, tmp82)
tmp84 = tl.where(tmp79, tmp11, tmp83)
tmp85 = tl.where(tmp78, tmp11, tmp84)
tmp86 = tmp77 * tmp85
tmp88 = tmp87 * tmp85
tmp89 = tmp86 + tmp88
tmp91 = tmp90 * tmp85
tmp92 = tmp89 + tmp91
tmp94 = tmp93 * tmp85
tmp95 = tmp92 + tmp94
tmp96 = tmp8 / tmp95
tmp97 = tmp96 * tmp11
tmp98 = tl.where(tmp76, tmp97, tmp75)
tl.store(in_out_ptr0 + (x4), tmp98, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/va/cvafse3g2wbrxvbys75tqy6cmwpzbuz5njqxlgkxxamw755yks55.py
# Topologically Sorted Source Nodes: [mul_4, sum_6], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul_4 => mul_12
# sum_6 => sum_3
# Graph fragment:
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_14, %unsqueeze_12), kwargs = {})
# %sum_3 : [num_users=4] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [2]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 64)
x0 = xindex % 16
x3 = xindex
tmp15 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 2, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp0 == tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp6, tmp9, tmp11)
tmp13 = tl.where(tmp4, tmp9, tmp12)
tmp14 = tl.where(tmp2, tmp9, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 * tmp17
tmp21 = tmp19 * tmp20
tmp22 = tmp14 * tmp21
tmp23 = tmp18 + tmp22
tmp26 = tmp24 * tmp25
tmp27 = tmp14 * tmp26
tmp28 = tmp23 + tmp27
tmp31 = tmp29 * tmp30
tmp32 = tmp14 * tmp31
tmp33 = tmp28 + tmp32
tl.store(out_ptr0 + (x3), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/3g/c3gdjgorexccrin2qiyn3gjuaozz6cwedf5zslwoc3jxzgjqwhbj.py
# Topologically Sorted Source Nodes: [tmp_2, s_2, truediv_8, truediv_9, truediv_10, truediv_11, s_3], Original ATen: [aten.zeros_like, aten.mul, aten.reciprocal]
# Source node to ATen node mapping:
# s_2 => mul_11
# s_3 => mul_17
# tmp_2 => full_default_12
# truediv_10 => mul_15, reciprocal_10
# truediv_11 => mul_16, reciprocal_11
# truediv_8 => mul_13, reciprocal_8
# truediv_9 => mul_14, reciprocal_9
# Graph fragment:
# %full_default_12 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_11 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %select_scatter_default_15), kwargs = {})
# %reciprocal_8 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_52,), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_8, 1), kwargs = {})
# %select_scatter_default_16 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default_12, %mul_13, 0, 0), kwargs = {})
# %reciprocal_9 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_55,), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_9, 1), kwargs = {})
# %select_scatter_default_17 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_16, %mul_14, 0, 1), kwargs = {})
# %reciprocal_10 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_59,), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_10, 1), kwargs = {})
# %select_scatter_default_18 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_17, %mul_15, 0, 2), kwargs = {})
# %reciprocal_11 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_63,), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_11, 1), kwargs = {})
# %select_scatter_default_19 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_18, %mul_16, 0, 3), kwargs = {})
# %mul_17 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_11, %select_scatter_default_19), kwargs = {})
triton_poi_fused_mul_reciprocal_zeros_like_4 = async_compile.triton('triton_poi_fused_mul_reciprocal_zeros_like_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_zeros_like_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reciprocal_zeros_like_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = x1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp7 / tmp6
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tl.full([1], 2, tl.int32)
tmp12 = tmp3 == tmp11
tmp14 = tmp7 / tmp13
tmp15 = tmp14 * tmp9
tmp16 = tmp3 == tmp7
tmp18 = tmp7 / tmp17
tmp19 = tmp18 * tmp9
tmp20 = tl.full([1], 0, tl.int32)
tmp21 = tmp3 == tmp20
tmp23 = tmp7 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = 0.0
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp16, tmp19, tmp26)
tmp28 = tl.where(tmp12, tmp15, tmp27)
tmp29 = tl.where(tmp5, tmp10, tmp28)
tmp30 = tmp2 * tmp29
tl.store(in_out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/q3/cq33kucvsebsa2qsd7xcpkdj7emgzoqz2bs5gnwrfotmnw7rqwxh.py
# Topologically Sorted Source Nodes: [tmp_4, s_4, truediv_16, truediv_17, truediv_18, truediv_19, s_5], Original ATen: [aten.zeros_like, aten.mul, aten.reciprocal]
# Source node to ATen node mapping:
# s_4 => mul_23
# s_5 => mul_29
# tmp_4 => full_default_14
# truediv_16 => mul_25, reciprocal_16
# truediv_17 => mul_26, reciprocal_17
# truediv_18 => mul_27, reciprocal_18
# truediv_19 => mul_28, reciprocal_19
# Graph fragment:
# %full_default_14 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_23 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_17, %select_scatter_default_23), kwargs = {})
# %reciprocal_16 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_82,), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_16, 1), kwargs = {})
# %select_scatter_default_24 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default_14, %mul_25, 0, 0), kwargs = {})
# %reciprocal_17 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_85,), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_17, 1), kwargs = {})
# %select_scatter_default_25 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_24, %mul_26, 0, 1), kwargs = {})
# %reciprocal_18 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_89,), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_18, 1), kwargs = {})
# %select_scatter_default_26 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_25, %mul_27, 0, 2), kwargs = {})
# %reciprocal_19 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_93,), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_19, 1), kwargs = {})
# %select_scatter_default_27 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_26, %mul_28, 0, 3), kwargs = {})
# %mul_29 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_23, %select_scatter_default_27), kwargs = {})
triton_poi_fused_mul_reciprocal_zeros_like_5 = async_compile.triton('triton_poi_fused_mul_reciprocal_zeros_like_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_zeros_like_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reciprocal_zeros_like_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = x1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp7 / tmp6
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tl.full([1], 2, tl.int32)
tmp12 = tmp3 == tmp11
tmp14 = tmp7 / tmp13
tmp15 = tmp14 * tmp9
tmp16 = tmp3 == tmp7
tmp18 = tmp7 / tmp17
tmp19 = tmp18 * tmp9
tmp20 = tl.full([1], 0, tl.int32)
tmp21 = tmp3 == tmp20
tmp23 = tmp7 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = 0.0
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp16, tmp19, tmp26)
tmp28 = tl.where(tmp12, tmp15, tmp27)
tmp29 = tl.where(tmp5, tmp10, tmp28)
tmp30 = tmp2 * tmp29
tl.store(in_out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5g/c5gkzcvu5ggugvgnsq7r5ucgtgvwtcond4j7qnvft2uiaelgzql5.py
# Topologically Sorted Source Nodes: [tmp_9, truediv_36, truediv_37, truediv_38, truediv_39, s_10], Original ATen: [aten.zeros_like, aten.reciprocal, aten.mul]
# Source node to ATen node mapping:
# s_10 => mul_59
# tmp_9 => full_default_19
# truediv_36 => mul_55, reciprocal_36
# truediv_37 => mul_56, reciprocal_37
# truediv_38 => mul_57, reciprocal_38
# truediv_39 => mul_58, reciprocal_39
# Graph fragment:
# %full_default_19 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %reciprocal_36 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_157,), kwargs = {})
# %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_36, 1), kwargs = {})
# %select_scatter_default_44 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default_19, %mul_55, 0, 0), kwargs = {})
# %reciprocal_37 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_160,), kwargs = {})
# %mul_56 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_37, 1), kwargs = {})
# %select_scatter_default_45 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_44, %mul_56, 0, 1), kwargs = {})
# %reciprocal_38 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_164,), kwargs = {})
# %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_38, 1), kwargs = {})
# %select_scatter_default_46 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_45, %mul_57, 0, 2), kwargs = {})
# %reciprocal_39 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%select_168,), kwargs = {})
# %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_39, 1), kwargs = {})
# %select_scatter_default_47 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_46, %mul_58, 0, 3), kwargs = {})
# %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_53, %select_scatter_default_47), kwargs = {})
triton_poi_fused_mul_reciprocal_zeros_like_6 = async_compile.triton('triton_poi_fused_mul_reciprocal_zeros_like_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_zeros_like_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reciprocal_zeros_like_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x0 = xindex % 4
x2 = (xindex // 16) % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (64 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (68 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr0 + (72 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (76 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr0 + (128 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr0 + (132 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp67 = tl.load(in_ptr0 + (136 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr0 + (140 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp77 = tl.load(in_ptr0 + (192 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp87 = tl.load(in_ptr0 + (196 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr0 + (200 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp93 = tl.load(in_ptr0 + (204 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp99 = tl.load(in_ptr0 + (x4), xmask)
tmp0 = x3
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tl.full([1], 2, tl.int32)
tmp7 = tmp1 == tmp6
tmp8 = tl.full([1], 1, tl.int32)
tmp9 = tmp1 == tmp8
tmp10 = tmp1 == tmp1
tmp11 = 1.0
tmp12 = 0.0
tmp13 = tl.where(tmp10, tmp11, tmp12)
tmp14 = tl.where(tmp9, tmp11, tmp13)
tmp15 = tl.where(tmp7, tmp11, tmp14)
tmp16 = tl.where(tmp5, tmp11, tmp15)
tmp17 = tmp3 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp16
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp16
tmp26 = tmp23 + tmp25
tmp27 = tmp8 / tmp26
tmp28 = tmp27 * tmp11
tmp29 = tl.where(tmp2, tmp28, tmp12)
tmp30 = tmp0 == tmp8
tmp32 = tmp8 == tmp4
tmp33 = tmp8 == tmp6
tmp34 = tmp8 == tmp8
tmp35 = tmp8 == tmp1
tmp36 = tl.where(tmp35, tmp11, tmp12)
tmp37 = tl.where(tmp34, tmp11, tmp36)
tmp38 = tl.where(tmp33, tmp11, tmp37)
tmp39 = tl.where(tmp32, tmp11, tmp38)
tmp40 = tmp31 * tmp39
tmp42 = tmp41 * tmp39
tmp43 = tmp40 + tmp42
tmp45 = tmp44 * tmp39
tmp46 = tmp43 + tmp45
tmp48 = tmp47 * tmp39
tmp49 = tmp46 + tmp48
tmp50 = tmp8 / tmp49
tmp51 = tmp50 * tmp11
tmp52 = tl.where(tmp30, tmp51, tmp29)
tmp53 = tmp0 == tmp6
tmp55 = tmp6 == tmp4
tmp56 = tmp6 == tmp6
tmp57 = tmp6 == tmp8
tmp58 = tmp6 == tmp1
tmp59 = tl.where(tmp58, tmp11, tmp12)
tmp60 = tl.where(tmp57, tmp11, tmp59)
tmp61 = tl.where(tmp56, tmp11, tmp60)
tmp62 = tl.where(tmp55, tmp11, tmp61)
tmp63 = tmp54 * tmp62
tmp65 = tmp64 * tmp62
tmp66 = tmp63 + tmp65
tmp68 = tmp67 * tmp62
tmp69 = tmp66 + tmp68
tmp71 = tmp70 * tmp62
tmp72 = tmp69 + tmp71
tmp73 = tmp8 / tmp72
tmp74 = tmp73 * tmp11
tmp75 = tl.where(tmp53, tmp74, tmp52)
tmp76 = tmp0 == tmp4
tmp78 = tmp4 == tmp4
tmp79 = tmp4 == tmp6
tmp80 = tmp4 == tmp8
tmp81 = tmp4 == tmp1
tmp82 = tl.where(tmp81, tmp11, tmp12)
tmp83 = tl.where(tmp80, tmp11, tmp82)
tmp84 = tl.where(tmp79, tmp11, tmp83)
tmp85 = tl.where(tmp78, tmp11, tmp84)
tmp86 = tmp77 * tmp85
tmp88 = tmp87 * tmp85
tmp89 = tmp86 + tmp88
tmp91 = tmp90 * tmp85
tmp92 = tmp89 + tmp91
tmp94 = tmp93 * tmp85
tmp95 = tmp92 + tmp94
tmp96 = tmp8 / tmp95
tmp97 = tmp96 * tmp11
tmp98 = tl.where(tmp76, tmp97, tmp75)
tmp100 = tmp99 * tmp98
tl.store(in_out_ptr0 + (x4), tmp100, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sum_2], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tmp, s, truediv, truediv_1, truediv_2, truediv_3, s_1], Original ATen: [aten.zeros_like, aten.add, aten.reciprocal, aten.mul]
triton_poi_fused_add_mul_reciprocal_zeros_like_1.run(arg0_1, buf0, buf1, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = buf0; del buf0 # reuse
buf3 = buf2; del buf2 # reuse
buf4 = buf3; del buf3 # reuse
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [tmp_1, truediv_4, truediv_5, truediv_6, truediv_7], Original ATen: [aten.zeros_like, aten.reciprocal, aten.mul]
triton_poi_fused_mul_reciprocal_zeros_like_2.run(buf5, buf1, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_4, sum_6], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf1, buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [tmp_2, s_2, truediv_8, truediv_9, truediv_10, truediv_11, s_3], Original ATen: [aten.zeros_like, aten.mul, aten.reciprocal]
triton_poi_fused_mul_reciprocal_zeros_like_4.run(buf7, buf5, buf6, 256, grid=grid(256), stream=stream0)
buf8 = buf6; del buf6 # reuse
buf9 = buf8; del buf8 # reuse
buf10 = buf9; del buf9 # reuse
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [tmp_3, truediv_12, truediv_13, truediv_14, truediv_15], Original ATen: [aten.zeros_like, aten.reciprocal, aten.mul]
triton_poi_fused_mul_reciprocal_zeros_like_2.run(buf11, buf7, 256, grid=grid(256), stream=stream0)
buf12 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [mul_8, sum_10], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf7, buf11, buf12, 256, grid=grid(256), stream=stream0)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [tmp_4, s_4, truediv_16, truediv_17, truediv_18, truediv_19, s_5], Original ATen: [aten.zeros_like, aten.mul, aten.reciprocal]
triton_poi_fused_mul_reciprocal_zeros_like_5.run(buf13, buf7, buf12, 256, grid=grid(256), stream=stream0)
buf14 = buf7; del buf7 # reuse
buf15 = buf14; del buf14 # reuse
buf16 = buf15; del buf15 # reuse
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [tmp_5, truediv_20, truediv_21, truediv_22, truediv_23], Original ATen: [aten.zeros_like, aten.reciprocal, aten.mul]
triton_poi_fused_mul_reciprocal_zeros_like_2.run(buf17, buf13, 256, grid=grid(256), stream=stream0)
buf18 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [mul_12, sum_14], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf13, buf17, buf18, 256, grid=grid(256), stream=stream0)
buf19 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [tmp_6, s_6, truediv_24, truediv_25, truediv_26, truediv_27, s_7], Original ATen: [aten.zeros_like, aten.mul, aten.reciprocal]
triton_poi_fused_mul_reciprocal_zeros_like_4.run(buf19, buf17, buf18, 256, grid=grid(256), stream=stream0)
buf20 = buf18; del buf18 # reuse
buf21 = buf20; del buf20 # reuse
buf22 = buf21; del buf21 # reuse
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [tmp_7, truediv_28, truediv_29, truediv_30, truediv_31], Original ATen: [aten.zeros_like, aten.reciprocal, aten.mul]
triton_poi_fused_mul_reciprocal_zeros_like_2.run(buf23, buf19, 256, grid=grid(256), stream=stream0)
buf24 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [mul_16, sum_18], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf19, buf23, buf24, 256, grid=grid(256), stream=stream0)
buf25 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [tmp_8, s_8, truediv_32, truediv_33, truediv_34, truediv_35, s_9], Original ATen: [aten.zeros_like, aten.mul, aten.reciprocal]
triton_poi_fused_mul_reciprocal_zeros_like_4.run(buf25, buf23, buf24, 256, grid=grid(256), stream=stream0)
del buf23
buf26 = buf24; del buf24 # reuse
buf27 = buf26; del buf26 # reuse
buf28 = buf27; del buf27 # reuse
buf29 = buf28; del buf28 # reuse
buf30 = buf29; del buf29 # reuse
# Topologically Sorted Source Nodes: [tmp_9, truediv_36, truediv_37, truediv_38, truediv_39, s_10], Original ATen: [aten.zeros_like, aten.reciprocal, aten.mul]
triton_poi_fused_mul_reciprocal_zeros_like_6.run(buf30, buf25, 256, grid=grid(256), stream=stream0)
del buf25
return (buf30, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
from sklearn import *
class Sinkhorn(nn.Module):
"""
BiStochastic Layer turns the input matrix into a bi-stochastic matrix.
Parameter: maximum iterations max_iter
a small number for numerical stability epsilon
Input: input matrix s
Output: bi-stochastic matrix s
"""
def __init__(self, max_iter=10, epsilon=0.0001):
super(Sinkhorn, self).__init__()
self.max_iter = max_iter
self.epsilon = epsilon
def forward(self, s, nrows=None, ncols=None, exp=False, exp_alpha=20,
dummy_row=False, dtype=torch.float32):
batch_size = s.shape[0]
if dummy_row:
dummy_shape = list(s.shape)
dummy_shape[1] = s.shape[2] - s.shape[1]
s = torch.cat((s, torch.full(dummy_shape, 0.0)), dim=1)
new_nrows = ncols
for b in range(batch_size):
s[b, nrows[b]:new_nrows[b], :ncols[b]] = self.epsilon
nrows = new_nrows
row_norm_ones = torch.zeros(batch_size, s.shape[1], s.shape[1],
device=s.device)
col_norm_ones = torch.zeros(batch_size, s.shape[2], s.shape[2],
device=s.device)
for b in range(batch_size):
row_slice = slice(0, nrows[b] if nrows is not None else s.shape[2])
col_slice = slice(0, ncols[b] if ncols is not None else s.shape[1])
row_norm_ones[b, row_slice, row_slice] = 1
col_norm_ones[b, col_slice, col_slice] = 1
if len(s.shape) == 4:
row_norm_ones = row_norm_ones.unsqueeze(-1)
col_norm_ones = col_norm_ones.unsqueeze(-1)
s += self.epsilon
for i in range(self.max_iter):
if exp:
s = torch.exp(exp_alpha * s)
if i % 2 == 1:
sum = torch.sum(torch.mul(s.unsqueeze(3), col_norm_ones.
unsqueeze(1)), dim=2)
else:
sum = torch.sum(torch.mul(row_norm_ones.unsqueeze(3), s.
unsqueeze(1)), dim=2)
tmp = torch.zeros_like(s)
for b in range(batch_size):
row_slice = slice(0, nrows[b] if nrows is not None else s.
shape[2])
col_slice = slice(0, ncols[b] if ncols is not None else s.
shape[1])
tmp[b, row_slice, col_slice] = 1 / sum[b, row_slice, col_slice]
s = s * tmp
if dummy_row and dummy_shape[1] > 0:
s = s[:, :-dummy_shape[1]]
return s
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
from sklearn import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 64
x0 = xindex % 16
x3 = xindex
tmp15 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 2, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp0 == tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp6, tmp9, tmp11)
tmp13 = tl.where(tmp4, tmp9, tmp12)
tmp14 = tl.where(tmp2, tmp9, tmp13)
tmp16 = 0.0001
tmp17 = tmp15 + tmp16
tmp18 = tmp14 * tmp17
tmp20 = tmp19 + tmp16
tmp21 = tmp14 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp23 + tmp16
tmp25 = tmp14 * tmp24
tmp26 = tmp22 + tmp25
tmp28 = tmp27 + tmp16
tmp29 = tmp14 * tmp28
tmp30 = tmp26 + tmp29
tl.store(out_ptr0 + x3, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_mul_reciprocal_zeros_like_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.0001
tmp2 = tmp0 + tmp1
tmp3 = x1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp7 / tmp6
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tl.full([1], 2, tl.int32)
tmp12 = tmp3 == tmp11
tmp14 = tmp7 / tmp13
tmp15 = tmp14 * tmp9
tmp16 = tmp3 == tmp7
tmp18 = tmp7 / tmp17
tmp19 = tmp18 * tmp9
tmp20 = tl.full([1], 0, tl.int32)
tmp21 = tmp3 == tmp20
tmp23 = tmp7 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = 0.0
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp16, tmp19, tmp26)
tmp28 = tl.where(tmp12, tmp15, tmp27)
tmp29 = tl.where(tmp5, tmp10, tmp28)
tmp30 = tmp2 * tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
tl.store(out_ptr2 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_reciprocal_zeros_like_2(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x0 = xindex % 4
x2 = xindex // 16 % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp41 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp44 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp54 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp67 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp70 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp77 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp87 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp90 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp93 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = x3
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tl.full([1], 2, tl.int32)
tmp7 = tmp1 == tmp6
tmp8 = tl.full([1], 1, tl.int32)
tmp9 = tmp1 == tmp8
tmp10 = tmp1 == tmp1
tmp11 = 1.0
tmp12 = 0.0
tmp13 = tl.where(tmp10, tmp11, tmp12)
tmp14 = tl.where(tmp9, tmp11, tmp13)
tmp15 = tl.where(tmp7, tmp11, tmp14)
tmp16 = tl.where(tmp5, tmp11, tmp15)
tmp17 = tmp3 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp16
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp16
tmp26 = tmp23 + tmp25
tmp27 = tmp8 / tmp26
tmp28 = tmp27 * tmp11
tmp29 = tl.where(tmp2, tmp28, tmp12)
tmp30 = tmp0 == tmp8
tmp32 = tmp8 == tmp4
tmp33 = tmp8 == tmp6
tmp34 = tmp8 == tmp8
tmp35 = tmp8 == tmp1
tmp36 = tl.where(tmp35, tmp11, tmp12)
tmp37 = tl.where(tmp34, tmp11, tmp36)
tmp38 = tl.where(tmp33, tmp11, tmp37)
tmp39 = tl.where(tmp32, tmp11, tmp38)
tmp40 = tmp31 * tmp39
tmp42 = tmp41 * tmp39
tmp43 = tmp40 + tmp42
tmp45 = tmp44 * tmp39
tmp46 = tmp43 + tmp45
tmp48 = tmp47 * tmp39
tmp49 = tmp46 + tmp48
tmp50 = tmp8 / tmp49
tmp51 = tmp50 * tmp11
tmp52 = tl.where(tmp30, tmp51, tmp29)
tmp53 = tmp0 == tmp6
tmp55 = tmp6 == tmp4
tmp56 = tmp6 == tmp6
tmp57 = tmp6 == tmp8
tmp58 = tmp6 == tmp1
tmp59 = tl.where(tmp58, tmp11, tmp12)
tmp60 = tl.where(tmp57, tmp11, tmp59)
tmp61 = tl.where(tmp56, tmp11, tmp60)
tmp62 = tl.where(tmp55, tmp11, tmp61)
tmp63 = tmp54 * tmp62
tmp65 = tmp64 * tmp62
tmp66 = tmp63 + tmp65
tmp68 = tmp67 * tmp62
tmp69 = tmp66 + tmp68
tmp71 = tmp70 * tmp62
tmp72 = tmp69 + tmp71
tmp73 = tmp8 / tmp72
tmp74 = tmp73 * tmp11
tmp75 = tl.where(tmp53, tmp74, tmp52)
tmp76 = tmp0 == tmp4
tmp78 = tmp4 == tmp4
tmp79 = tmp4 == tmp6
tmp80 = tmp4 == tmp8
tmp81 = tmp4 == tmp1
tmp82 = tl.where(tmp81, tmp11, tmp12)
tmp83 = tl.where(tmp80, tmp11, tmp82)
tmp84 = tl.where(tmp79, tmp11, tmp83)
tmp85 = tl.where(tmp78, tmp11, tmp84)
tmp86 = tmp77 * tmp85
tmp88 = tmp87 * tmp85
tmp89 = tmp86 + tmp88
tmp91 = tmp90 * tmp85
tmp92 = tmp89 + tmp91
tmp94 = tmp93 * tmp85
tmp95 = tmp92 + tmp94
tmp96 = tmp8 / tmp95
tmp97 = tmp96 * tmp11
tmp98 = tl.where(tmp76, tmp97, tmp75)
tl.store(in_out_ptr0 + x4, tmp98, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 64
x0 = xindex % 16
x3 = xindex
tmp15 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 2, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp0 == tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp6, tmp9, tmp11)
tmp13 = tl.where(tmp4, tmp9, tmp12)
tmp14 = tl.where(tmp2, tmp9, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 * tmp17
tmp21 = tmp19 * tmp20
tmp22 = tmp14 * tmp21
tmp23 = tmp18 + tmp22
tmp26 = tmp24 * tmp25
tmp27 = tmp14 * tmp26
tmp28 = tmp23 + tmp27
tmp31 = tmp29 * tmp30
tmp32 = tmp14 * tmp31
tmp33 = tmp28 + tmp32
tl.store(out_ptr0 + x3, tmp33, xmask)
@triton.jit
def triton_poi_fused_mul_reciprocal_zeros_like_4(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = x1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp7 / tmp6
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tl.full([1], 2, tl.int32)
tmp12 = tmp3 == tmp11
tmp14 = tmp7 / tmp13
tmp15 = tmp14 * tmp9
tmp16 = tmp3 == tmp7
tmp18 = tmp7 / tmp17
tmp19 = tmp18 * tmp9
tmp20 = tl.full([1], 0, tl.int32)
tmp21 = tmp3 == tmp20
tmp23 = tmp7 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = 0.0
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp16, tmp19, tmp26)
tmp28 = tl.where(tmp12, tmp15, tmp27)
tmp29 = tl.where(tmp5, tmp10, tmp28)
tmp30 = tmp2 * tmp29
tl.store(in_out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_mul_reciprocal_zeros_like_5(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = x1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp7 / tmp6
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tl.full([1], 2, tl.int32)
tmp12 = tmp3 == tmp11
tmp14 = tmp7 / tmp13
tmp15 = tmp14 * tmp9
tmp16 = tmp3 == tmp7
tmp18 = tmp7 / tmp17
tmp19 = tmp18 * tmp9
tmp20 = tl.full([1], 0, tl.int32)
tmp21 = tmp3 == tmp20
tmp23 = tmp7 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = 0.0
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp16, tmp19, tmp26)
tmp28 = tl.where(tmp12, tmp15, tmp27)
tmp29 = tl.where(tmp5, tmp10, tmp28)
tmp30 = tmp2 * tmp29
tl.store(in_out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_mul_reciprocal_zeros_like_6(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x0 = xindex % 4
x2 = xindex // 16 % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp41 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp44 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp54 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp67 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp70 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp77 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp87 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp90 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp93 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp99 = tl.load(in_ptr0 + x4, xmask)
tmp0 = x3
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tl.full([1], 2, tl.int32)
tmp7 = tmp1 == tmp6
tmp8 = tl.full([1], 1, tl.int32)
tmp9 = tmp1 == tmp8
tmp10 = tmp1 == tmp1
tmp11 = 1.0
tmp12 = 0.0
tmp13 = tl.where(tmp10, tmp11, tmp12)
tmp14 = tl.where(tmp9, tmp11, tmp13)
tmp15 = tl.where(tmp7, tmp11, tmp14)
tmp16 = tl.where(tmp5, tmp11, tmp15)
tmp17 = tmp3 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp16
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp16
tmp26 = tmp23 + tmp25
tmp27 = tmp8 / tmp26
tmp28 = tmp27 * tmp11
tmp29 = tl.where(tmp2, tmp28, tmp12)
tmp30 = tmp0 == tmp8
tmp32 = tmp8 == tmp4
tmp33 = tmp8 == tmp6
tmp34 = tmp8 == tmp8
tmp35 = tmp8 == tmp1
tmp36 = tl.where(tmp35, tmp11, tmp12)
tmp37 = tl.where(tmp34, tmp11, tmp36)
tmp38 = tl.where(tmp33, tmp11, tmp37)
tmp39 = tl.where(tmp32, tmp11, tmp38)
tmp40 = tmp31 * tmp39
tmp42 = tmp41 * tmp39
tmp43 = tmp40 + tmp42
tmp45 = tmp44 * tmp39
tmp46 = tmp43 + tmp45
tmp48 = tmp47 * tmp39
tmp49 = tmp46 + tmp48
tmp50 = tmp8 / tmp49
tmp51 = tmp50 * tmp11
tmp52 = tl.where(tmp30, tmp51, tmp29)
tmp53 = tmp0 == tmp6
tmp55 = tmp6 == tmp4
tmp56 = tmp6 == tmp6
tmp57 = tmp6 == tmp8
tmp58 = tmp6 == tmp1
tmp59 = tl.where(tmp58, tmp11, tmp12)
tmp60 = tl.where(tmp57, tmp11, tmp59)
tmp61 = tl.where(tmp56, tmp11, tmp60)
tmp62 = tl.where(tmp55, tmp11, tmp61)
tmp63 = tmp54 * tmp62
tmp65 = tmp64 * tmp62
tmp66 = tmp63 + tmp65
tmp68 = tmp67 * tmp62
tmp69 = tmp66 + tmp68
tmp71 = tmp70 * tmp62
tmp72 = tmp69 + tmp71
tmp73 = tmp8 / tmp72
tmp74 = tmp73 * tmp11
tmp75 = tl.where(tmp53, tmp74, tmp52)
tmp76 = tmp0 == tmp4
tmp78 = tmp4 == tmp4
tmp79 = tmp4 == tmp6
tmp80 = tmp4 == tmp8
tmp81 = tmp4 == tmp1
tmp82 = tl.where(tmp81, tmp11, tmp12)
tmp83 = tl.where(tmp80, tmp11, tmp82)
tmp84 = tl.where(tmp79, tmp11, tmp83)
tmp85 = tl.where(tmp78, tmp11, tmp84)
tmp86 = tmp77 * tmp85
tmp88 = tmp87 * tmp85
tmp89 = tmp86 + tmp88
tmp91 = tmp90 * tmp85
tmp92 = tmp89 + tmp91
tmp94 = tmp93 * tmp85
tmp95 = tmp92 + tmp94
tmp96 = tmp8 / tmp95
tmp97 = tmp96 * tmp11
tmp98 = tl.where(tmp76, tmp97, tmp75)
tmp100 = tmp99 * tmp98
tl.store(in_out_ptr0 + x4, tmp100, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_reciprocal_zeros_like_1[grid(256)](arg0_1,
buf0, buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = buf0
del buf0
buf3 = buf2
del buf2
buf4 = buf3
del buf3
buf5 = buf4
del buf4
triton_poi_fused_mul_reciprocal_zeros_like_2[grid(256)](buf5, buf1,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_3[grid(256)](buf1, buf5, buf6, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf7 = buf1
del buf1
triton_poi_fused_mul_reciprocal_zeros_like_4[grid(256)](buf7, buf5,
buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = buf6
del buf6
buf9 = buf8
del buf8
buf10 = buf9
del buf9
buf11 = buf10
del buf10
triton_poi_fused_mul_reciprocal_zeros_like_2[grid(256)](buf11, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = buf5
del buf5
triton_poi_fused_mul_sum_3[grid(256)](buf7, buf11, buf12, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused_mul_reciprocal_zeros_like_5[grid(256)](buf13, buf7,
buf12, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf14 = buf7
del buf7
buf15 = buf14
del buf14
buf16 = buf15
del buf15
buf17 = buf16
del buf16
triton_poi_fused_mul_reciprocal_zeros_like_2[grid(256)](buf17,
buf13, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf18 = buf12
del buf12
triton_poi_fused_mul_sum_3[grid(256)](buf13, buf17, buf18, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf19 = buf13
del buf13
triton_poi_fused_mul_reciprocal_zeros_like_4[grid(256)](buf19,
buf17, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf20 = buf18
del buf18
buf21 = buf20
del buf20
buf22 = buf21
del buf21
buf23 = buf22
del buf22
triton_poi_fused_mul_reciprocal_zeros_like_2[grid(256)](buf23,
buf19, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf24 = buf17
del buf17
triton_poi_fused_mul_sum_3[grid(256)](buf19, buf23, buf24, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf25 = buf19
del buf19
triton_poi_fused_mul_reciprocal_zeros_like_4[grid(256)](buf25,
buf23, buf24, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf23
buf26 = buf24
del buf24
buf27 = buf26
del buf26
buf28 = buf27
del buf27
buf29 = buf28
del buf28
buf30 = buf29
del buf29
triton_poi_fused_mul_reciprocal_zeros_like_6[grid(256)](buf30,
buf25, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf25
return buf30,
class SinkhornNew(nn.Module):
"""
BiStochastic Layer turns the input matrix into a bi-stochastic matrix.
Parameter: maximum iterations max_iter
a small number for numerical stability epsilon
Input: input matrix s
Output: bi-stochastic matrix s
"""
def __init__(self, max_iter=10, epsilon=0.0001):
super(SinkhornNew, self).__init__()
self.max_iter = max_iter
self.epsilon = epsilon
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| CityU-AIM-Group/SIGMA | Sinkhorn | false | 17,461 | [
"MIT"
] | 5 | 19f89777db8d42f750a9b87756d3326c7efd18f5 | https://github.com/CityU-AIM-Group/SIGMA/tree/19f89777db8d42f750a9b87756d3326c7efd18f5 |
TotalVariations | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/fv/cfveoc5p46ebuhfrd7emhb63v4lcoyswrunqlnp23yqn7pbiocuh.py
# Topologically Sorted Source Nodes: [sub, abs_1, sum_1, sub_1, abs_2, sum_2, add], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_2
# add => add
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_6), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_8, %slice_11), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_abs_add_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 12
r1 = (rindex // 12)
r2 = rindex % 48
r3 = (rindex // 48)
tmp0 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (r2 + (64*r3)), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (16 + r2 + (64*r3)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, sum_1, sub_1, abs_2, sum_2, add], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn.modules.loss import _Loss
class TotalVariations(_Loss):
def forward(self, img1):
return torch.sum(torch.abs(img1[:, :, :-1] - img1[:, :, 1:])
) + torch.sum(torch.abs(img1[:, :-1, :] - img1[:, 1:, :]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 48
r3 = rindex // 48
tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (r2 + 64 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (16 + r2 + 64 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TotalVariationsNew(_Loss):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| HMS-CardiacMR/MyoMapNet-Myocardial-Parametric-Mapping | TotalVariations | false | 17,462 | [
"MIT"
] | 4 | 1e2dee8d6d1f97722eba91618462537faf9efba7 | https://github.com/HMS-CardiacMR/MyoMapNet-Myocardial-Parametric-Mapping/tree/1e2dee8d6d1f97722eba91618462537faf9efba7 |
Nloss_GD | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4j/c4jweqfmta44vuuovtwajznuemzrztxffx6uhzrvfqq3cgstucrq.py
# Topologically Sorted Source Nodes: [Beta, log, sum_1, log_det_term, sub, dist, mul_1, mul_2, exponent, add, norm_term, log_p, neg, sum_3, E], Original ATen: [aten.pow, aten.log, aten.sum, aten.mul, aten.sub, aten.add, aten.neg, aten.div]
# Source node to ATen node mapping:
# Beta => pow_1
# E => div
# add => add
# dist => pow_2
# exponent => sum_2
# log => log
# log_det_term => mul
# log_p => add_1
# mul_1 => mul_3
# mul_2 => mul_4
# neg => neg
# norm_term => full_default
# sub => sub
# sum_1 => sum_1
# sum_3 => sum_3
# Graph fragment:
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%pow_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%log, [1]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, -0.5), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %pow_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %sum_2), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -3.6757541328186907), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %full_default), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_1,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%neg,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {})
triton_per_fused_add_div_log_mul_neg_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_neg_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_neg_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_log_mul_neg_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp6 = tl.load(in_ptr2 + (r0 + (64*r1)), None)
tmp9 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None)
tmp18 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp19 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp23 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None)
tmp27 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp28 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp32 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = -0.5
tmp5 = tmp3 * tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 * tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp4
tmp15 = tmp14 * tmp14
tmp16 = tmp13 * tmp15
tmp17 = tmp8 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp21 * tmp4
tmp24 = tmp23 * tmp23
tmp25 = tmp22 * tmp24
tmp26 = tmp17 + tmp25
tmp29 = tmp27 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp30 * tmp4
tmp33 = tmp32 * tmp32
tmp34 = tmp31 * tmp33
tmp35 = tmp26 + tmp34
tmp36 = tl_math.log(tmp7)
tmp37 = tl_math.log(tmp15)
tmp38 = tmp36 + tmp37
tmp39 = tl_math.log(tmp24)
tmp40 = tmp38 + tmp39
tmp41 = tl_math.log(tmp33)
tmp42 = tmp40 + tmp41
tmp43 = 0.5
tmp44 = tmp42 * tmp43
tmp45 = tmp44 + tmp35
tmp46 = -3.6757541328186907
tmp47 = tmp45 + tmp46
tmp48 = -tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 0.25
tmp53 = tmp51 * tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp53, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [Beta, log, sum_1, log_det_term, sub, dist, mul_1, mul_2, exponent, add, norm_term, log_p, neg, sum_3, E], Original ATen: [aten.pow, aten.log, aten.sum, aten.mul, aten.sub, aten.add, aten.neg, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_log_mul_neg_pow_sub_sum_0.run(buf2, arg1_1, arg2_1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
class Nloss_GD(nn.Module):
def __init__(self, dim):
super(Nloss_GD, self).__init__()
self.dim = dim
torch.manual_seed(0)
def get_likelihoods(self, X, Y, Beta, eps=1e-06):
inv_det = Beta.prod(dim=1)
if (inv_det < eps).any():
inv_det += (inv_det < eps).type(torch.FloatTensor) * eps
det = 1 / inv_det
None
norm_term = 1 / torch.sqrt((2 * np.pi) ** self.dim * torch.abs(det))
inv_covars = Beta
dist = (Y - X).pow(2)
exponent = (-0.5 * dist * inv_covars).sum(dim=1)
pk = norm_term * exponent.exp()
return pk
def get_log_likelihoods(self, X, Y, sq_Beta, eps=1e-06):
Beta = sq_Beta ** 2
log_det_term = 0.5 * Beta.log().sum(dim=1)
norm_term = -0.5 * np.log(2 * np.pi) * self.dim
inv_covars = Beta
dist = (Y - X).pow(2)
exponent = (-0.5 * dist * inv_covars).sum(dim=1)
log_p = log_det_term + exponent + norm_term
return log_p
def forward(self, x, y, Beta):
p = self.get_log_likelihoods(x, y, Beta)
E = torch.sum(-p) / x.shape[0]
return E
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_neg_pow_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp6 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp18 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp23 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp27 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp28 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp32 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = -0.5
tmp5 = tmp3 * tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 * tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp4
tmp15 = tmp14 * tmp14
tmp16 = tmp13 * tmp15
tmp17 = tmp8 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp21 * tmp4
tmp24 = tmp23 * tmp23
tmp25 = tmp22 * tmp24
tmp26 = tmp17 + tmp25
tmp29 = tmp27 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp30 * tmp4
tmp33 = tmp32 * tmp32
tmp34 = tmp31 * tmp33
tmp35 = tmp26 + tmp34
tmp36 = tl_math.log(tmp7)
tmp37 = tl_math.log(tmp15)
tmp38 = tmp36 + tmp37
tmp39 = tl_math.log(tmp24)
tmp40 = tmp38 + tmp39
tmp41 = tl_math.log(tmp33)
tmp42 = tmp40 + tmp41
tmp43 = 0.5
tmp44 = tmp42 * tmp43
tmp45 = tmp44 + tmp35
tmp46 = -3.6757541328186907
tmp47 = tmp45 + tmp46
tmp48 = -tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 0.25
tmp53 = tmp51 * tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_div_log_mul_neg_pow_sub_sum_0[grid(1)](buf2,
arg1_1, arg2_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class Nloss_GDNew(nn.Module):
def __init__(self, dim):
super(Nloss_GDNew, self).__init__()
self.dim = dim
torch.manual_seed(0)
def get_likelihoods(self, X, Y, Beta, eps=1e-06):
inv_det = Beta.prod(dim=1)
if (inv_det < eps).any():
inv_det += (inv_det < eps).type(torch.FloatTensor) * eps
det = 1 / inv_det
None
norm_term = 1 / torch.sqrt((2 * np.pi) ** self.dim * torch.abs(det))
inv_covars = Beta
dist = (Y - X).pow(2)
exponent = (-0.5 * dist * inv_covars).sum(dim=1)
pk = norm_term * exponent.exp()
return pk
def get_log_likelihoods(self, X, Y, sq_Beta, eps=1e-06):
Beta = sq_Beta ** 2
log_det_term = 0.5 * Beta.log().sum(dim=1)
norm_term = -0.5 * np.log(2 * np.pi) * self.dim
inv_covars = Beta
dist = (Y - X).pow(2)
exponent = (-0.5 * dist * inv_covars).sum(dim=1)
log_p = log_det_term + exponent + norm_term
return log_p
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| JavierAntoran/tiger-costume | Nloss_GD | false | 17,463 | [
"MIT"
] | 10 | 975661dfab2c435281f74c6be86529b16881ebcb | https://github.com/JavierAntoran/tiger-costume/tree/975661dfab2c435281f74c6be86529b16881ebcb |
LogisticRegressionBinaryClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/u7/cu7el5okpylwtytr7ulyozevinrghb53q53bmqwswo2zwf3hvhr3.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class LogisticRegressionBinaryClassifier(nn.Module):
def __init__(self, input_size):
super(LogisticRegressionBinaryClassifier, self).__init__()
self.input_size = input_size
self.mapping = nn.Linear(input_size, 1)
def forward(self, x):
return torch.sigmoid(self.mapping(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class LogisticRegressionBinaryClassifierNew(nn.Module):
def __init__(self, input_size):
super(LogisticRegressionBinaryClassifierNew, self).__init__()
self.input_size = input_size
self.mapping = nn.Linear(input_size, 1)
def forward(self, input_0):
primals_1 = self.mapping.weight
primals_2 = self.mapping.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| JayWalker512/PacketGAN | LogisticRegressionBinaryClassifier | false | 17,464 | [
"MIT"
] | 5 | 93d4266ab9299c25ffd1f0aedf68fa4639f66572 | https://github.com/JayWalker512/PacketGAN/tree/93d4266ab9299c25ffd1f0aedf68fa4639f66572 |
TVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4a/c4akiau5kki7pvgwqf3uldfxropgk3idc4qj6xvifejn46i5na2y.py
# Topologically Sorted Source Nodes: [sub, abs_1, sum_1, sub_1, abs_2, sum_2, add, mul], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add, aten.mul]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_2
# add => add
# mul => mul
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {})
triton_per_fused_abs_add_mul_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_mul_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 12
r1 = (rindex // 12)
r2 = rindex
r3 = rindex % 3
r4 = (rindex // 3)
tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r3 + (4*r4)), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r3 + (4*r4)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tmp17 = 1.0
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp2, rmask)
tl.store(out_ptr1 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp10, rmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 4), (48, 12, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, sum_1, sub_1, abs_2, sum_2, add, mul], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_mul_sub_sum_0.run(buf4, arg0_1, buf0, buf2, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
return (buf4, buf2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as th
import torch.utils.data
import torch
import torch.autograd
class TVLoss(th.nn.Module):
def __init__(self, strength=1.0):
super(TVLoss, self).__init__()
self.strength = strength
def forward(self, input):
self.x_diff = input[:, :, 1:, :] - input[:, :, :-1, :]
self.y_diff = input[:, :, :, 1:] - input[:, :, :, :-1]
self.loss = (th.sum(th.abs(self.x_diff)) + th.sum(th.abs(self.y_diff))
) * self.strength
return input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch as th
import torch.utils.data
import torch
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_mul_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex
r3 = rindex % 3
r4 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r3 + 4 * r4), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r3 + 4 * r4), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tmp17 = 1.0
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp2, rmask)
tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp10, rmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 4), (48, 12, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
buf4 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_abs_add_mul_sub_sum_0[grid(1)](buf4, arg0_1, buf0,
buf2, 1, 192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf4, buf2, buf0
class TVLossNew(th.nn.Module):
def __init__(self, strength=1.0):
super(TVLossNew, self).__init__()
self.strength = strength
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| JCBrouwer/maua | TVLoss | false | 17,465 | [
"BSD-2-Clause"
] | 9 | 4208023020bc56dd81f6933347f9c4e7c1853318 | https://github.com/JCBrouwer/maua/tree/4208023020bc56dd81f6933347f9c4e7c1853318 |
StridedNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cx/ccxx7ogetsg5xueniw76nfwj5k7icpgxq2up6h7z7ykwrncdxbj5.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 139240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3481) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mp/cmp52m5iig3sqzbdulh7bdqonhjqronc6uq65hjmyfdwmapy7zuz.py
# Topologically Sorted Source Nodes: [max_pool2d, out], Original ATen: [aten.max_pool2d_with_indices, aten.gelu, aten.gelu_backward]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# out => add, erf, mul, mul_1, mul_2
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [1, 1], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %getitem), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_35, -0.5), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_36,), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, 0.3989422804014327), kwargs = {})
# %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %mul_37), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_34, %mul_38), kwargs = {})
triton_poi_fused_gelu_gelu_backward_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_gelu_gelu_backward_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_gelu_backward_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_gelu_backward_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 134560
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 58
x1 = (xindex // 58) % 58
x2 = (xindex // 3364)
x3 = xindex % 3364
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (59*x1) + (3481*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (59*x1) + (3481*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (59 + x0 + (59*x1) + (3481*x2)), xmask)
tmp12 = tl.load(in_ptr0 + (60 + x0 + (59*x1) + (3481*x2)), xmask)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = 0.7071067811865476
tmp20 = tmp16 * tmp19
tmp21 = libdevice.erf(tmp20)
tmp22 = 1.0
tmp23 = tmp21 + tmp22
tmp24 = tmp18 * tmp23
tmp25 = tmp23 * tmp17
tmp26 = tmp16 * tmp16
tmp27 = -0.5
tmp28 = tmp26 * tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 0.3989422804014327
tmp31 = tmp29 * tmp30
tmp32 = tmp16 * tmp31
tmp33 = tmp25 + tmp32
tl.store(out_ptr0 + (x3 + (3456*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x4), tmp24, xmask)
tl.store(out_ptr2 + (x3 + (3392*x2)), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/s7/cs7jolrbmxdflbljqraqf52j24ad3etms6misoctl4w3yzoi7rh4.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_2, %primals_4, %primals_5, [1, 1], [0, 0], [2, 2], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 216320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 2704) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/pf/cpfhsgs7dnpwmh4s7zqybx2meoq5iqr3sy5sysoa7issfwh3uekp.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.gelu, aten.gelu_backward]
# Source node to ATen node mapping:
# out_1 => add_1, erf_1, mul_3, mul_4, mul_5
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, 0.5), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, 0.7071067811865476), kwargs = {})
# %erf_1 : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_4,), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf_1, 1), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %add_1), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, %getitem_2), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_28, -0.5), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_29,), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_2, 0.3989422804014327), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, %mul_30), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_27, %mul_31), kwargs = {})
triton_poi_fused_gelu_gelu_backward_3 = async_compile.triton('triton_poi_fused_gelu_gelu_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_gelu_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_gelu_backward_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 200000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 2500
x2 = (xindex // 2500)
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp9 = tmp7 * tmp1
tmp10 = tmp0 * tmp0
tmp11 = -0.5
tmp12 = tmp10 * tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = 0.3989422804014327
tmp15 = tmp13 * tmp14
tmp16 = tmp0 * tmp15
tmp17 = tmp9 + tmp16
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x1 + (2528*x2)), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vj/cvjlk33rkvyrjfirv2otuliqs3wtkokfszoundd3hdaqwiltrofq.py
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.gelu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# out_2 => add_2, erf_2, mul_6, mul_7, mul_8
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_5, %primals_6, %primals_7, [1, 1], [0, 0], [4, 4], False, [0, 0], 1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.5), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.7071067811865476), kwargs = {})
# %erf_2 : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_7,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf_2, 1), kwargs = {})
# %mul_8 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %add_2), kwargs = {})
triton_poi_fused_convolution_gelu_4 = async_compile.triton('triton_poi_fused_convolution_gelu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_gelu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_gelu_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 270848
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 2116) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/ve/cventnfn5cbotajqlomxi6ee64v5g7istz5m4q7sswqhe237y3ow.py
# Topologically Sorted Source Nodes: [conv2d_3, out_3], Original ATen: [aten.convolution, aten.gelu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# out_3 => add_3, erf_3, mul_10, mul_11, mul_9
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_8, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.5), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.7071067811865476), kwargs = {})
# %erf_3 : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_10,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf_3, 1), kwargs = {})
# %mul_11 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %add_3), kwargs = {})
triton_poi_fused_convolution_gelu_5 = async_compile.triton('triton_poi_fused_convolution_gelu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_gelu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_gelu_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 135424
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 2116) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/sz/csz2kzfnlbfvemyrld64afnvaexb5uykkcjucasjwhvpxkmhygjp.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_4 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_11, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8464
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 6, 6), (36, 36, 6, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 4, 4), (160, 16, 4, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (32, 20, 2, 2), (80, 4, 2, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (16, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (1, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 59, 59), (34810, 3481, 59, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 139240, grid=grid(139240), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 10, 58, 58), (34560, 3456, 58, 1), torch.int8)
buf3 = empty_strided_cuda((4, 10, 58, 58), (33640, 3364, 58, 1), torch.float32)
buf19 = empty_strided_cuda((4, 10, 58, 58), (33920, 3392, 58, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d, out], Original ATen: [aten.max_pool2d_with_indices, aten.gelu, aten.gelu_backward]
triton_poi_fused_gelu_gelu_backward_max_pool2d_with_indices_1.run(buf1, buf2, buf3, buf19, 134560, grid=grid(134560), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 52, 52), (54080, 2704, 52, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 216320, grid=grid(216320), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
buf6 = torch.ops.aten.max_pool2d_with_indices.default(buf5, [2, 2], [1, 1], [0, 0], [2, 2])
buf7 = buf6[0]
buf8 = buf6[1]
del buf6
buf9 = empty_strided_cuda((4, 20, 50, 50), (50000, 2500, 50, 1), torch.float32)
buf18 = empty_strided_cuda((4, 20, 50, 50), (50560, 2528, 50, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.gelu, aten.gelu_backward]
triton_poi_fused_gelu_gelu_backward_3.run(buf7, buf9, buf18, 200000, grid=grid(200000), stream=stream0)
del buf7
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 46, 46), (67712, 2116, 46, 1))
buf11 = buf10; del buf10 # reuse
buf12 = empty_strided_cuda((4, 32, 46, 46), (67712, 2116, 46, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.gelu]
triton_poi_fused_convolution_gelu_4.run(buf11, primals_7, buf12, 270848, grid=grid(270848), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 16, 46, 46), (33856, 2116, 46, 1))
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 16, 46, 46), (33856, 2116, 46, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, out_3], Original ATen: [aten.convolution, aten.gelu]
triton_poi_fused_convolution_gelu_5.run(buf14, primals_9, buf15, 135424, grid=grid(135424), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 1, 46, 46), (2116, 2116, 46, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf17, primals_11, 8464, grid=grid(8464), stream=stream0)
del primals_11
return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2, buf3, buf5, buf8, buf9, buf11, buf12, buf14, buf15, buf18, buf19, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 1, 6, 6), (36, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 10, 4, 4), (160, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 20, 2, 2), (80, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class StridedNet(nn.Module):
def __init__(self):
super(StridedNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=
6, stride=1, dilation=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=1, dilation=1)
self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size
=4, stride=1, dilation=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=1, dilation=2)
self.fc1 = nn.Conv2d(in_channels=20, out_channels=32, kernel_size=2,
stride=1, dilation=4)
self.fc2 = nn.Conv2d(in_channels=32, out_channels=16, kernel_size=1,
stride=1, dilation=1)
self.fc3 = nn.Conv2d(in_channels=16, out_channels=1, kernel_size=1,
stride=1, dilation=1)
def forward(self, x):
out = F.gelu(self.pool1(self.conv1(x)))
out = F.gelu(self.pool2(self.conv2(out)))
out = F.gelu(self.fc1(out))
out = F.gelu(self.fc2(out))
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 139240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3481 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_gelu_gelu_backward_max_pool2d_with_indices_1(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 134560
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 58
x1 = xindex // 58 % 58
x2 = xindex // 3364
x3 = xindex % 3364
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 59 * x1 + 3481 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 59 * x1 + 3481 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (59 + x0 + 59 * x1 + 3481 * x2), xmask)
tmp12 = tl.load(in_ptr0 + (60 + x0 + 59 * x1 + 3481 * x2), xmask)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = 0.7071067811865476
tmp20 = tmp16 * tmp19
tmp21 = libdevice.erf(tmp20)
tmp22 = 1.0
tmp23 = tmp21 + tmp22
tmp24 = tmp18 * tmp23
tmp25 = tmp23 * tmp17
tmp26 = tmp16 * tmp16
tmp27 = -0.5
tmp28 = tmp26 * tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 0.3989422804014327
tmp31 = tmp29 * tmp30
tmp32 = tmp16 * tmp31
tmp33 = tmp25 + tmp32
tl.store(out_ptr0 + (x3 + 3456 * x2), tmp15, xmask)
tl.store(out_ptr1 + x4, tmp24, xmask)
tl.store(out_ptr2 + (x3 + 3392 * x2), tmp33, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 216320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 2704 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_gelu_gelu_backward_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 200000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 2500
x2 = xindex // 2500
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp9 = tmp7 * tmp1
tmp10 = tmp0 * tmp0
tmp11 = -0.5
tmp12 = tmp10 * tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = 0.3989422804014327
tmp15 = tmp13 * tmp14
tmp16 = tmp0 * tmp15
tmp17 = tmp9 + tmp16
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + (x1 + 2528 * x2), tmp17, xmask)
@triton.jit
def triton_poi_fused_convolution_gelu_4(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 270848
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 2116 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_gelu_5(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 135424
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 2116 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8464
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (10, 1, 6, 6), (36, 36, 6, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 4, 4), (160, 16, 4, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (32, 20, 2, 2), (80, 4, 2, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (16, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (1, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 59, 59), (34810, 3481, 59, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(139240)](buf1, primals_2,
139240, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 10, 58, 58), (34560, 3456, 58, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 10, 58, 58), (33640, 3364, 58, 1),
torch.float32)
buf19 = empty_strided_cuda((4, 10, 58, 58), (33920, 3392, 58, 1),
torch.float32)
triton_poi_fused_gelu_gelu_backward_max_pool2d_with_indices_1[grid(
134560)](buf1, buf2, buf3, buf19, 134560, XBLOCK=512, num_warps
=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 52, 52), (54080, 2704, 52, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(216320)](buf5, primals_5,
216320, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = torch.ops.aten.max_pool2d_with_indices.default(buf5, [2, 2],
[1, 1], [0, 0], [2, 2])
buf7 = buf6[0]
buf8 = buf6[1]
del buf6
buf9 = empty_strided_cuda((4, 20, 50, 50), (50000, 2500, 50, 1),
torch.float32)
buf18 = empty_strided_cuda((4, 20, 50, 50), (50560, 2528, 50, 1),
torch.float32)
triton_poi_fused_gelu_gelu_backward_3[grid(200000)](buf7, buf9,
buf18, 200000, XBLOCK=1024, num_warps=4, num_stages=1)
del buf7
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 46, 46), (67712, 2116, 46, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((4, 32, 46, 46), (67712, 2116, 46, 1),
torch.float32)
triton_poi_fused_convolution_gelu_4[grid(270848)](buf11, primals_7,
buf12, 270848, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf13 = extern_kernels.convolution(buf12, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 16, 46, 46), (33856, 2116, 46, 1))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 16, 46, 46), (33856, 2116, 46, 1),
torch.float32)
triton_poi_fused_convolution_gelu_5[grid(135424)](buf14, primals_9,
buf15, 135424, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 1, 46, 46), (2116, 2116, 46, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_6[grid(8464)](buf17, primals_11, 8464,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf2, buf3, buf5, buf8, buf9, buf11, buf12, buf14,
buf15, buf18, buf19)
class StridedNetNew(nn.Module):
def __init__(self):
super(StridedNetNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=
6, stride=1, dilation=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=1, dilation=1)
self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size
=4, stride=1, dilation=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=1, dilation=2)
self.fc1 = nn.Conv2d(in_channels=20, out_channels=32, kernel_size=2,
stride=1, dilation=4)
self.fc2 = nn.Conv2d(in_channels=32, out_channels=16, kernel_size=1,
stride=1, dilation=1)
self.fc3 = nn.Conv2d(in_channels=16, out_channels=1, kernel_size=1,
stride=1, dilation=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| JHorcasitas/cnn_document_binarization | StridedNet | false | 17,466 | [
"MIT"
] | 9 | 075f76aed375ca14a53011f4dfeb12379debb5b3 | https://github.com/JHorcasitas/cnn_document_binarization/tree/075f76aed375ca14a53011f4dfeb12379debb5b3 |
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/tt/ctthexrcgmoykvsyasq7xirwxi6m3yxgjocmuvarikaawgqvdiws.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hc/chcdx6ttmlttffwusj5w6vjo2khj6mg5rgpxcpvcjyf4xor54xwb.py
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# instance_norm => add, rsqrt, var_mean
# out_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5g/c5gxks3yqvci4quqbwwhect6gkb5urcmnqe6r5cio2in2vodlfqy.py
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# instance_norm => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/zo/czo4w4jiravqnnhuvvpxl7kim6nsxjmv6ck2k3djnldqrfvz45ss.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_2 => relu
# out_3 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nq/cnqhut6jx7bqfau7k3dcfrfo64swt4wygolskbjjrzrdclfhydfl.py
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# out_4 => convolution_1
# out_5 => add_2, repeat_2, rsqrt_1, var_mean_1
# out_6 => add_4
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + (16*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + (16*x0)), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + (16*x0)), tmp3, xmask)
tl.store(out_ptr3 + (r3 + (16*x0)), tmp31, xmask)
tl.store(out_ptr4 + (x0), tmp25, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_4, buf3, 16, grid=grid(16), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_5, buf4, 16, grid=grid(16), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf11 = buf10; del buf10 # reuse
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4.run(buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf16, 16, 16, grid=grid(16), stream=stream0)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16, ), (1, ), 0), reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x, in_weights=None):
residual = x
if in_weights is None:
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
else:
out = self.conv1(x)
out = F.instance_norm(out, weight=in_weights['in1.weight'],
bias=in_weights['in1.bias'])
out = self.relu(out)
out = self.conv2(out)
out = F.instance_norm(out, weight=in_weights['in2.weight'],
bias=in_weights['in2.bias'])
out = out + residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask)
tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf6
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf8, primals_3, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5,
buf8, buf3, buf4, buf9, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16,), (1,), torch.float32)
buf11 = buf10
del buf10
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4[grid
(16)](buf11, primals_8, primals_7, primals_9, primals_1, buf12,
buf13, buf17, buf16, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8,
buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0),
reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0))
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlockNew(torch.nn.Module):
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_4 = self.in1.weight
primals_5 = self.in1.bias
primals_6 = self.conv2.conv2d.weight
primals_7 = self.conv2.conv2d.bias
primals_8 = self.in2.weight
primals_9 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| JEF1056/Reconstruction-Style | ResidualBlock | false | 17,467 | [
"MIT"
] | 6 | 3430d9e9f05c6980ae251cf15b619148a2c899d6 | https://github.com/JEF1056/Reconstruction-Style/tree/3430d9e9f05c6980ae251cf15b619148a2c899d6 |
BoundaryDiscriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/xr/cxr6gmimv3jiv6zdozcrmjqpwe2tyqhehgtzc34gfyronwa3imaq.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/il/cilhfzazlplgany6lvvwipagfi6u3k53riuh7sqquy7dpwlzabdr.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4v/c4vfxmxymmvy5pzdrclzuzzy55wlfxekcs4griocefopzzy7s4lt.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uy/cuywog7v4jbxtzmdugix5vclzfytkx54dmd6fi45uf6y6wd6yz3i.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_3, mul_3, where_3
# Graph fragment:
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr1 + (x0), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, buf1, buf2, 278784, grid=grid(278784), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, buf4, buf5, 147968, grid=grid(147968), stream=stream0)
del buf3
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf6, buf7, buf8, 82944, grid=grid(82944), stream=stream0)
del buf6
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_3.run(buf9, buf10, buf11, 51200, grid=grid(51200), stream=stream0)
del buf9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BoundaryDiscriminator(nn.Module):
def __init__(self):
super(BoundaryDiscriminator, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(1, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.conv2(x))
x = self.leakyrelu(self.conv3(x))
x = self.leakyrelu(self.conv4(x))
x = self.conv5(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, None)
tl.store(out_ptr1 + x0, tmp5, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(278784)](buf0, buf1, buf2,
278784, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.float32)
triton_poi_fused_leaky_relu_1[grid(147968)](buf3, buf4, buf5,
147968, XBLOCK=1024, num_warps=4, num_stages=1)
del buf3
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool
)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.
float32)
triton_poi_fused_leaky_relu_2[grid(82944)](buf6, buf7, buf8, 82944,
XBLOCK=1024, num_warps=4, num_stages=1)
del buf6
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.float32)
triton_poi_fused_leaky_relu_3[grid(51200)](buf9, buf10, buf11,
51200, XBLOCK=512, num_warps=4, num_stages=1)
del buf9
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11)
class BoundaryDiscriminatorNew(nn.Module):
def __init__(self):
super(BoundaryDiscriminatorNew, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(1, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| JACKYLUO1991/DCBNet | BoundaryDiscriminator | false | 17,468 | [
"MIT"
] | 6 | b797584b66ad99fe984f58268befb12ec60ccfae | https://github.com/JACKYLUO1991/DCBNet/tree/b797584b66ad99fe984f58268befb12ec60ccfae |
MaskDiscriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/xr/cxr6gmimv3jiv6zdozcrmjqpwe2tyqhehgtzc34gfyronwa3imaq.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/il/cilhfzazlplgany6lvvwipagfi6u3k53riuh7sqquy7dpwlzabdr.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4v/c4vfxmxymmvy5pzdrclzuzzy55wlfxekcs4griocefopzzy7s4lt.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/uy/cuywog7v4jbxtzmdugix5vclzfytkx54dmd6fi45uf6y6wd6yz3i.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_3, mul_3, where_3
# Graph fragment:
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr1 + (x0), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (2, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, buf1, buf2, 278784, grid=grid(278784), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, buf4, buf5, 147968, grid=grid(147968), stream=stream0)
del buf3
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf6, buf7, buf8, 82944, grid=grid(82944), stream=stream0)
del buf6
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_3.run(buf9, buf10, buf11, 51200, grid=grid(51200), stream=stream0)
del buf9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 2, 3, 3), (18, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 2, 64, 64), (8192, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaskDiscriminator(nn.Module):
def __init__(self):
super(MaskDiscriminator, self).__init__()
filter_num_list = [64, 128, 256, 512, 2]
self.conv1 = nn.Conv2d(2, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.conv2(x))
x = self.leakyrelu(self.conv3(x))
x = self.leakyrelu(self.conv4(x))
x = self.conv5(x)
return x
def get_inputs():
return [torch.rand([4, 2, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, None)
tl.store(out_ptr1 + x0, tmp5, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (2, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(278784)](buf0, buf1, buf2,
278784, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.float32)
triton_poi_fused_leaky_relu_1[grid(147968)](buf3, buf4, buf5,
147968, XBLOCK=1024, num_warps=4, num_stages=1)
del buf3
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool
)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.
float32)
triton_poi_fused_leaky_relu_2[grid(82944)](buf6, buf7, buf8, 82944,
XBLOCK=1024, num_warps=4, num_stages=1)
del buf6
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.float32)
triton_poi_fused_leaky_relu_3[grid(51200)](buf9, buf10, buf11,
51200, XBLOCK=512, num_warps=4, num_stages=1)
del buf9
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 2, 3, 3), (18, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11)
class MaskDiscriminatorNew(nn.Module):
def __init__(self):
super(MaskDiscriminatorNew, self).__init__()
filter_num_list = [64, 128, 256, 512, 2]
self.conv1 = nn.Conv2d(2, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| JACKYLUO1991/DCBNet | MaskDiscriminator | false | 17,469 | [
"MIT"
] | 6 | b797584b66ad99fe984f58268befb12ec60ccfae | https://github.com/JACKYLUO1991/DCBNet/tree/b797584b66ad99fe984f58268befb12ec60ccfae |
StrongMask | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ch/cchvvxck55hnihnwiy5ersitpzcieftrte37kxl5gwnhidimatdc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/o4/co4pmatq2ocbyjariiddjrqof4cpeh7oi3ha4nqoeswhmgbzaqnl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 4
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (1024*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/cq/ccqmk7hvw4hf5vfetrd2dk3vojlwtldmtelvszzrqrssjf47qzlp.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = (xindex // 6)
x2 = xindex % 6
y4 = yindex
x5 = xindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = (-1) + x3
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x2
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x2 + (4*x3) + (16*y4)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (y0 + (256*x5) + (9216*y1)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4n/c4n5t4codrcco6pnxjke5x7wjkabgttqbolgz4rrndrt2y6hlnyl.py
# Topologically Sorted Source Nodes: [x, x_1, pad_1], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad_1 => constant_pad_nd_1
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_relu_3 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 1536) % 6
x1 = (xindex // 256) % 6
x3 = (xindex // 9216)
x4 = xindex % 1536
x0 = xindex % 256
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-1280) + x4 + (1024*x2) + (4096*x3)), tmp10, other=0.0)
tmp12 = tl.load(in_ptr1 + (x0), tmp10, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (x6), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/gc/cgctjlgnbq4wfwdwoevcpfb7vf3b2chjk7tvdxs3kk4uy3k2jnp7.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_6 => convolution_3
# x_7 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/u4/cu4twncn4uucspgmszqo247anmlvovjl42fqj2jujbhs2zzcz4j6.py
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_8 => convolution_4
# x_9 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/sx/csxsedzaaoaykhwk75c2bs4hvhdctmku2a5u7esl3tky353r5sb2.py
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# x_10 => convolution_5
# x_11 => sigmoid
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_5,), kwargs = {})
triton_poi_fused_convolution_sigmoid_6 = async_compile.triton('triton_poi_fused_convolution_sigmoid_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 64], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 84
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 21
y1 = (yindex // 21)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (21*x2) + (1344*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + (64*y3)), tmp3, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lq/clqy6dmcwdz3hicr3k2jrifvmhgcmenfvdhylbfsg2obsgcjluqf.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_4 => convolution_2
# x_5 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (256, 256, 2, 2), (1024, 4, 2, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (21, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_13, (21, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_2, buf0, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_4, buf1, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_6, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_6
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_8, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_8
buf4 = empty_strided_cuda((256, 256, 2, 2), (1024, 1, 512, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_10, buf4, 65536, 4, grid=grid(65536, 4), stream=stream0)
del primals_10
buf5 = empty_strided_cuda((4, 256, 6, 6), (9216, 1, 1536, 256), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_2.run(primals_1, buf5, 1024, 36, grid=grid(1024, 36), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = empty_strided_cuda((4, 256, 6, 6), (9216, 1, 1536, 256), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1, pad_1], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_3.run(buf6, primals_3, buf7, 36864, grid=grid(36864), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf9 = empty_strided_cuda((4, 256, 6, 6), (9216, 1, 1536, 256), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3, pad_2], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_3.run(buf8, primals_5, buf9, 36864, grid=grid(36864), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf11 = empty_strided_cuda((4, 256, 6, 6), (9216, 1, 1536, 256), torch.float32)
# Topologically Sorted Source Nodes: [x_4, x_5, pad_3], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_3.run(buf10, primals_7, buf11, 36864, grid=grid(36864), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_9, 16384, grid=grid(16384), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf15, primals_11, 65536, grid=grid(65536), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 21, 8, 8), (1344, 1, 168, 21))
buf17 = empty_strided_cuda((4, 21, 8, 8), (1344, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_6.run(buf16, primals_13, buf17, 84, 64, grid=grid(84, 64), stream=stream0)
del buf16
del primals_13
buf18 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool)
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf10, primals_7, buf18, 16384, grid=grid(16384), stream=stream0)
del buf10
del primals_7
buf19 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf8, primals_5, buf19, 16384, grid=grid(16384), stream=stream0)
del buf8
del primals_5
buf20 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf6, primals_3, buf20, 16384, grid=grid(16384), stream=stream0)
del buf6
del primals_3
return (buf17, buf0, buf1, buf2, buf3, buf4, primals_12, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf18, buf19, buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((21, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((21, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class SamePad2dStrong(nn.Module):
"""Mimics tensorflow's 'SAME' padding.
"""
def __init__(self, kernel_size, stride):
super(SamePad2dStrong, self).__init__()
self.kernel_size = torch.nn.modules.utils._pair(kernel_size)
self.stride = torch.nn.modules.utils._pair(stride)
def forward(self, input):
in_width = input.size()[2]
in_height = input.size()[3]
out_width = math.ceil(float(in_width) / float(self.stride[0]))
out_height = math.ceil(float(in_height) / float(self.stride[1]))
pad_along_width = (out_width - 1) * self.stride[0] + self.kernel_size[0
] - in_width
pad_along_height = (out_height - 1) * self.stride[1
] + self.kernel_size[1] - in_height
pad_left = math.floor(pad_along_width / 2)
pad_top = math.floor(pad_along_height / 2)
pad_right = pad_along_width - pad_left
pad_bottom = pad_along_height - pad_top
return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom),
'constant', 0)
def __repr__(self):
return self.__class__.__name__
class StrongMask(nn.Module):
def __init__(self, depth=256, pool_size=14, num_classes=21):
super(StrongMask, self).__init__()
self.depth = depth
self.pool_size = pool_size
self.num_classes = num_classes
self.padding = SamePad2dStrong(kernel_size=3, stride=1)
self.conv1 = nn.Conv2d(self.depth, 256, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.deconv = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(256, num_classes, kernel_size=1, stride=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(self.padding(x))
x = self.relu(x)
x = self.conv2(self.padding(x))
x = self.relu(x)
x = self.conv3(self.padding(x))
x = self.relu(x)
x = self.conv4(self.padding(x))
x = self.relu(x)
x = self.deconv(x)
x = self.relu(x)
x = self.conv5(x)
x = self.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 256, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex // 6
x2 = xindex % 6
y4 = yindex
x5 = xindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = -1 + x3
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x2
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x2 + 4 * x3 + 16 * y4), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (y0 + 256 * x5 + 9216 * y1), tmp11, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 1536 % 6
x1 = xindex // 256 % 6
x3 = xindex // 9216
x4 = xindex % 1536
x0 = xindex % 256
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-1280 + x4 + 1024 * x2 + 4096 * x3), tmp10,
other=0.0)
tmp12 = tl.load(in_ptr1 + x0, tmp10, eviction_policy='evict_last',
other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + x6, tmp17, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_6(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 84
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 21
y1 = yindex // 21
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 21 * x2 + 1344 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + 64 * y3), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (256, 256, 2, 2), (1024, 4, 2, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (21, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_13, (21,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(65536, 9)](primals_2, buf0, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_0[grid(65536, 9)](primals_4, buf1, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_0[grid(65536, 9)](primals_6, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_0[grid(65536, 9)](primals_8, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf4 = empty_strided_cuda((256, 256, 2, 2), (1024, 1, 512, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 4)](primals_10, buf4, 65536, 4,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf5 = empty_strided_cuda((4, 256, 6, 6), (9216, 1, 1536, 256),
torch.float32)
triton_poi_fused_constant_pad_nd_2[grid(1024, 36)](primals_1, buf5,
1024, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf6 = extern_kernels.convolution(buf5, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = empty_strided_cuda((4, 256, 6, 6), (9216, 1, 1536, 256),
torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_3[grid(36864)](buf6,
primals_3, buf7, 36864, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf9 = empty_strided_cuda((4, 256, 6, 6), (9216, 1, 1536, 256),
torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_3[grid(36864)](buf8,
primals_5, buf9, 36864, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf11 = empty_strided_cuda((4, 256, 6, 6), (9216, 1, 1536, 256),
torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_3[grid(36864)](buf10,
primals_7, buf11, 36864, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(16384)](buf13, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf14 = extern_kernels.convolution(buf13, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_5[grid(65536)](buf15, primals_11,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf16 = extern_kernels.convolution(buf15, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 21, 8, 8), (1344, 1, 168, 21))
buf17 = empty_strided_cuda((4, 21, 8, 8), (1344, 64, 8, 1), torch.
float32)
triton_poi_fused_convolution_sigmoid_6[grid(84, 64)](buf16,
primals_13, buf17, 84, 64, XBLOCK=64, YBLOCK=4, num_warps=4,
num_stages=1)
del buf16
del primals_13
buf18 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(16384)](
buf10, primals_7, buf18, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf10
del primals_7
buf19 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(16384)](
buf8, primals_5, buf19, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf8
del primals_5
buf20 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(16384)](
buf6, primals_3, buf20, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_3
return (buf17, buf0, buf1, buf2, buf3, buf4, primals_12, buf5, buf7,
buf9, buf11, buf13, buf15, buf17, buf18, buf19, buf20)
class SamePad2dStrong(nn.Module):
"""Mimics tensorflow's 'SAME' padding.
"""
def __init__(self, kernel_size, stride):
super(SamePad2dStrong, self).__init__()
self.kernel_size = torch.nn.modules.utils._pair(kernel_size)
self.stride = torch.nn.modules.utils._pair(stride)
def forward(self, input):
in_width = input.size()[2]
in_height = input.size()[3]
out_width = math.ceil(float(in_width) / float(self.stride[0]))
out_height = math.ceil(float(in_height) / float(self.stride[1]))
pad_along_width = (out_width - 1) * self.stride[0] + self.kernel_size[0
] - in_width
pad_along_height = (out_height - 1) * self.stride[1
] + self.kernel_size[1] - in_height
pad_left = math.floor(pad_along_width / 2)
pad_top = math.floor(pad_along_height / 2)
pad_right = pad_along_width - pad_left
pad_bottom = pad_along_height - pad_top
return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom),
'constant', 0)
def __repr__(self):
return self.__class__.__name__
class StrongMaskNew(nn.Module):
def __init__(self, depth=256, pool_size=14, num_classes=21):
super(StrongMaskNew, self).__init__()
self.depth = depth
self.pool_size = pool_size
self.num_classes = num_classes
self.padding = SamePad2dStrong(kernel_size=3, stride=1)
self.conv1 = nn.Conv2d(self.depth, 256, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.deconv = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(256, num_classes, kernel_size=1, stride=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.deconv.weight
primals_11 = self.deconv.bias
primals_12 = self.conv5.weight
primals_13 = self.conv5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| IssamLaradji/wisenet | StrongMask | false | 17,470 | [
"Apache-2.0"
] | 7 | 881457f5168815f5e9d03f110244842d539747a0 | https://github.com/IssamLaradji/wisenet/tree/881457f5168815f5e9d03f110244842d539747a0 |
GramMatrix | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/wj/cwjovumjyzw6sbji2jtden2f255enmkgejqtavr543fi6wsz7pww.py
# Topologically Sorted Source Nodes: [div], Original ATen: [aten.div]
# Source node to ATen node mapping:
# div => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 64), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 0.015625
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0)
del arg0_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [div], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(buf1, 64, grid=grid(64), stream=stream0)
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GramMatrix(nn.Module):
def forward(self, input):
a, b, c, d = input.size()
features = input.view(a, b, c * d)
G = torch.bmm(features, features.transpose(1, 2))
return G.div(b * c * d)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.015625
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16,
1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0),
out=buf0)
del arg0_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_div_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf1,
class GramMatrixNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Jay2020-01/TextureGAN--Flask | GramMatrix | false | 17,471 | [
"MIT"
] | 5 | cddea505b0d66b58d58fb24435f8bae42fd5a852 | https://github.com/Jay2020-01/TextureGAN--Flask/tree/cddea505b0d66b58d58fb24435f8bae42fd5a852 |
GeneralizedDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/7m/c7m25mks7iqhmv2aiadogxxp6cpxkpgxkxelv4rozuqdg44rckt6.py
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# ground_o => sum_2
# intersection => sum_1
# mul => mul
# pred_o => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2, 3]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [2, 3]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/tj/ctjazizwevoemcclqrkqpcb52wmnx7ev5xokw77t22zqjaxx2onu.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put.default](args = (%select, [%isinf], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = tl.full([1], 1, tl.int32)
tmp3 = tmp2 / tmp1
tmp4 = libdevice.isinf(tmp3).to(tl.int1)
tmp5 = 0.0
tmp6 = tl.where(tmp4, tmp5, tmp3)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/f5/cf5xqgi2mcq2v5veq3dy7wpdzcdh4tgd3a7rtnniqvqy7wjeygti.py
# Topologically Sorted Source Nodes: [mul_1, w], Original ATen: [aten.mul, aten.reciprocal]
# Source node to ATen node mapping:
# mul_1 => mul_1
# w => reciprocal
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, %sum_2), kwargs = {})
# %reciprocal : [num_users=2] = call_function[target=torch.ops.aten.reciprocal.default](args = (%mul_1,), kwargs = {})
# %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%reciprocal, %index_put, 0, 0), kwargs = {})
triton_poi_fused_mul_reciprocal_2 = async_compile.triton('triton_poi_fused_mul_reciprocal_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reciprocal_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp4 * tmp4
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp6 / tmp5
tmp8 = tl.where(tmp2, tmp3, tmp7)
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4h/c4hmyfdxcbaplebs26bfcav3mub3727nx6jlp6sgll4iyrpo2bsu.py
# Topologically Sorted Source Nodes: [max_1, setitem_1], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_1 => max_1
# setitem_1 => index_put_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_4,), kwargs = {})
# %index_put_1 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_4, [%isinf], %max_1), kwargs = {})
triton_per_fused_index_put_max_3 = async_compile.triton('triton_per_fused_index_put_max_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_3', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_3(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp0 = tl.full([1, 1], 0, tl.int32)
tmp1 = tmp0 == tmp0
tmp4 = tmp3 * tmp3
tmp5 = tl.full([1, 1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tl.where(tmp1, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = triton_helpers.max2(tmp8, 1)[:, None]
tmp11 = libdevice.isinf(tmp6).to(tl.int1)
tmp12 = tl.where(tmp11, tmp10, tmp7)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/4f/c4fymeput2evkdqk4zlg4gsrereajxkardbxqgznl3erbrx4qn4a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %index_put_1, 0, 0), kwargs = {})
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jp/cjpduuj6nal2pd5g4mq5uklckvpl7xcorf6znekgmjqj5czl7a2r.py
# Topologically Sorted Source Nodes: [setitem_2], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_2 => full_default_1, index_put_2
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_2 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_6, [%isinf_1], %full_default_1), kwargs = {})
triton_poi_fused_index_put_lift_fresh_5 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_5', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_5(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp0 = tl.full([1], 1, tl.int32)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (4 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mp/cmpbwt4j3eqiiapxydqg64iedxyrw37rpdx3t34mb2rpacopkaib.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %index_put_2, 0, 1), kwargs = {})
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/nm/cnmvv3ciux7ohniwfer6ezf2yoozsw55ipbzhc3bvmwgt6uvqlyf.py
# Topologically Sorted Source Nodes: [max_2, setitem_3], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_2 => max_2
# setitem_3 => index_put_3
# Graph fragment:
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_7,), kwargs = {})
# %index_put_3 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_7, [%isinf_1], %max_2), kwargs = {})
triton_per_fused_index_put_max_7 = async_compile.triton('triton_per_fused_index_put_max_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_7', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_7(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (4 + r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr1 + (4 + r0), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(4 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/p6/cp6phqe3lxxsdm5opvanw5ozgenf7l2fkor76yodajcuawslbmtg.py
# Topologically Sorted Source Nodes: [setitem_4], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_4 => full_default_2, index_put_4
# Graph fragment:
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_4 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_9, [%isinf_2], %full_default_2), kwargs = {})
triton_poi_fused_index_put_lift_fresh_8 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_8', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_8(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int32)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (8 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hw/chw62ci2ybogskka7n2utjqx2atlmgnnnpcl3hbddxhd6r2r44yc.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %index_put_4, 0, 2), kwargs = {})
triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7t/c7t4lv6ve7idaxozxuq5c6il47ha6zuyaep7bxtix2ghldu3hg3m.py
# Topologically Sorted Source Nodes: [max_3, setitem_5], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_3 => max_3
# setitem_5 => index_put_5
# Graph fragment:
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_10,), kwargs = {})
# %index_put_5 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_10, [%isinf_2], %max_3), kwargs = {})
triton_per_fused_index_put_max_10 = async_compile.triton('triton_per_fused_index_put_max_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_10', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_10(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (8 + r0), None)
tmp9 = tl.load(in_ptr1 + (4 + r0), None)
tmp10 = tl.load(in_ptr1 + (8 + r0), None)
tmp0 = tl.full([1, 1], 2, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 1, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(8 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/jw/cjwbtzoaquf5dpvcf5bo7p2waahbplntxvrpglbsogoqfauplo66.py
# Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_6 => full_default_3, index_put_6
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_6 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_12, [%isinf_3], %full_default_3), kwargs = {})
triton_poi_fused_index_put_lift_fresh_11 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_11', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_11(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp0 = tl.full([1], 3, tl.int32)
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (12 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/em/cem3owcb73xk76hmwz2kiw3dj2m7m77o3cgioah4z6zbmc5r2osz.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %index_put_6, 0, 3), kwargs = {})
triton_poi_fused_12 = async_compile.triton('triton_poi_fused_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/um/cumqedgkzwjzduj3qcqju6bpxqjfgogp2bvlyike7eei6csbpqes.py
# Topologically Sorted Source Nodes: [max_4, setitem_7], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_4 => max_4
# setitem_7 => index_put_7
# Graph fragment:
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_13,), kwargs = {})
# %index_put_7 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_13, [%isinf_3], %max_4), kwargs = {})
triton_per_fused_index_put_max_13 = async_compile.triton('triton_per_fused_index_put_max_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_13', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_13(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (12 + r0), None)
tmp9 = tl.load(in_ptr1 + (8 + r0), None)
tmp10 = tl.load(in_ptr1 + (12 + r0), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 2, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/y7/cy7oasv654bs2kbeoblr55g6wd6fzbc3n275kr5jw26svutmddnl.py
# Topologically Sorted Source Nodes: [mul_2, sum_4, mul_3, add_1, denominator, mul_4, sum_5, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# denominator => add
# f => sub
# f_1 => mean
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# sum_4 => sum_4
# sum_5 => sum_5
# truediv => div
# Graph fragment:
# %select_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_6, %index_put_7, 0, 3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %select_scatter_default_7), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 2.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1e-05), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %select_scatter_default_7), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sum_14 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (12))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp12 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (14))
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (15))
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp26 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (4*r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp1 = r0
tmp2 = tl.full([1, 1], 3, tl.int32)
tmp3 = tmp1 == tmp2
tmp7 = tl.where(tmp3, tmp5, tmp6)
tmp8 = tmp0 * tmp7
tmp13 = tl.where(tmp3, tmp11, tmp12)
tmp14 = tmp9 * tmp13
tmp15 = tmp8 + tmp14
tmp20 = tl.where(tmp3, tmp18, tmp19)
tmp21 = tmp16 * tmp20
tmp22 = tmp15 + tmp21
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tmp23 * tmp27
tmp29 = tmp22 + tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp7
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp13
tmp38 = tmp33 + tmp37
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp20
tmp43 = tmp38 + tmp42
tmp46 = tmp44 + tmp45
tmp47 = tmp46 * tmp27
tmp48 = tmp43 + tmp47
tmp49 = 2.0
tmp50 = tmp29 * tmp49
tmp51 = 1e-05
tmp52 = tmp50 + tmp51
tmp53 = tmp48 + tmp51
tmp54 = tmp52 / tmp53
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp61, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf29, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_1.run(buf1, buf2, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, w], Original ATen: [aten.mul, aten.reciprocal]
triton_poi_fused_mul_reciprocal_2.run(buf2, buf1, buf4, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_1, setitem_1], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_3.run(buf2, buf1, buf4, 1, 4, grid=grid(1), stream=stream0)
del buf2
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(buf4, buf7, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_2], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_5.run(buf4, buf7, 4, grid=grid(4), stream=stream0)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(buf7, buf11, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_2, setitem_3], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_7.run(buf7, buf4, buf11, 1, 4, grid=grid(1), stream=stream0)
buf14 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(buf11, buf14, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_4], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_8.run(buf11, buf14, 4, grid=grid(4), stream=stream0)
buf18 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(buf14, buf18, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_3, setitem_5], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_10.run(buf14, buf11, buf18, 1, 4, grid=grid(1), stream=stream0)
buf21 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(buf18, buf21, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_11.run(buf18, buf21, 4, grid=grid(4), stream=stream0)
buf25 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_12.run(buf21, buf25, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_4, setitem_7], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_13.run(buf21, buf18, buf25, 1, 4, grid=grid(1), stream=stream0)
del buf18
del buf21
buf31 = empty_strided_cuda((), (), torch.float32)
buf32 = buf31; del buf31 # reuse
# Topologically Sorted Source Nodes: [mul_2, sum_4, mul_3, add_1, denominator, mul_4, sum_5, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
triton_per_fused_add_div_mean_mul_rsub_sum_14.run(buf32, buf0, buf25, buf1, buf29, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf25
del buf29
return (buf32, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import collections
import torch
import warnings
from typing import Optional
from typing import Union
from typing import Callable
from typing import Any
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndimension() < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class Weight(Enum):
"""
See also: :py:class:`monai.losses.dice.GeneralizedDiceLoss`
"""
SQUARE = 'square'
SIMPLE = 'simple'
UNIFORM = 'uniform'
class GeneralizedDiceLoss(_Loss):
"""
Compute the generalised Dice loss defined in:
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017.
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, w_type: 'Union[Weight, str]'=Weight.
SQUARE, reduction: 'Union[LossReduction, str]'=LossReduction.MEAN,
smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch: 'bool'=False
) ->None:
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
w_type: {``"square"``, ``"simple"``, ``"uniform"``}
Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, intersection over union is computed from each item in the batch.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
w_type = Weight(w_type)
self.w_func: 'Callable' = torch.ones_like
if w_type == Weight.SIMPLE:
self.w_func = torch.reciprocal
elif w_type == Weight.SQUARE:
self.w_func = lambda x: torch.reciprocal(x * x)
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `softmax=True` ignored.')
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `to_onehot_y=True` ignored.')
else:
target = one_hot(target, num_classes=n_pred_ch)
if not self.include_background:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `include_background=False` ignored.'
)
else:
target = target[:, 1:]
input = input[:, 1:]
assert target.shape == input.shape, f'ground truth has differing shape ({target.shape}) from input ({input.shape})'
reduce_axis = list(range(2, len(input.shape)))
if self.batch:
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, reduce_axis)
ground_o = torch.sum(target, reduce_axis)
pred_o = torch.sum(input, reduce_axis)
denominator = ground_o + pred_o
w = self.w_func(ground_o.float())
for b in w:
infs = torch.isinf(b)
b[infs] = 0.0
b[infs] = torch.max(b)
f: 'torch.Tensor' = 1.0 - (2.0 * (intersection * w).sum(1) + self.
smooth_nr) / ((denominator * w).sum(1) + self.smooth_dr)
if self.reduction == LossReduction.MEAN.value:
f = torch.mean(f)
elif self.reduction == LossReduction.SUM.value:
f = torch.sum(f)
elif self.reduction == LossReduction.NONE.value:
pass
else:
raise ValueError(
f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].'
)
return f
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import collections
from typing import Optional
from typing import Union
from typing import Callable
from typing import Any
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tl.full([1], 1, tl.int32)
tmp3 = tmp2 / tmp1
tmp4 = libdevice.isinf(tmp3).to(tl.int1)
tmp5 = 0.0
tmp6 = tl.where(tmp4, tmp5, tmp3)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_reciprocal_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp4 * tmp4
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp6 / tmp5
tmp8 = tl.where(tmp2, tmp3, tmp7)
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_index_put_max_3(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp0 = tl.full([1, 1], 0, tl.int32)
tmp1 = tmp0 == tmp0
tmp4 = tmp3 * tmp3
tmp5 = tl.full([1, 1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tl.where(tmp1, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = triton_helpers.max2(tmp8, 1)[:, None]
tmp11 = libdevice.isinf(tmp6).to(tl.int1)
tmp12 = tl.where(tmp11, tmp10, tmp7)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_5(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp0 = tl.full([1], 1, tl.int32)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (4 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_7(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (4 + r0), None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr1 + (4 + r0), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(4 + r0, [XBLOCK, RBLOCK]), tmp13, None)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_8(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int32)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (8 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_10(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (8 + r0), None)
tmp9 = tl.load(in_ptr1 + (4 + r0), None)
tmp10 = tl.load(in_ptr1 + (8 + r0), None)
tmp0 = tl.full([1, 1], 2, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 1, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(8 + r0, [XBLOCK, RBLOCK]), tmp13, None)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_11(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp0 = tl.full([1], 3, tl.int32)
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (12 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_13(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (12 + r0), None)
tmp9 = tl.load(in_ptr1 + (8 + r0), None)
tmp10 = tl.load(in_ptr1 + (12 + r0), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 2, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK]), tmp13, None
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_14(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 12)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 13)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp12 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + 14)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + 15)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp26 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = r0
tmp2 = tl.full([1, 1], 3, tl.int32)
tmp3 = tmp1 == tmp2
tmp7 = tl.where(tmp3, tmp5, tmp6)
tmp8 = tmp0 * tmp7
tmp13 = tl.where(tmp3, tmp11, tmp12)
tmp14 = tmp9 * tmp13
tmp15 = tmp8 + tmp14
tmp20 = tl.where(tmp3, tmp18, tmp19)
tmp21 = tmp16 * tmp20
tmp22 = tmp15 + tmp21
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tmp23 * tmp27
tmp29 = tmp22 + tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp7
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp13
tmp38 = tmp33 + tmp37
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp20
tmp43 = tmp38 + tmp42
tmp46 = tmp44 + tmp45
tmp47 = tmp46 * tmp27
tmp48 = tmp43 + tmp47
tmp49 = 2.0
tmp50 = tmp29 * tmp49
tmp51 = 1e-05
tmp52 = tmp50 + tmp51
tmp53 = tmp48 + tmp51
tmp54 = tmp52 / tmp53
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp61, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(16)](arg1_1, arg0_1, buf0, buf1,
buf29, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_index_put_lift_fresh_1[grid(4)](buf1, buf2, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_reciprocal_2[grid(16)](buf2, buf1, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
triton_per_fused_index_put_max_3[grid(1)](buf2, buf1, buf4, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
del buf2
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_4[grid(16)](buf4, buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
triton_poi_fused_index_put_lift_fresh_5[grid(4)](buf4, buf7, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_6[grid(16)](buf7, buf11, 16, XBLOCK=16, num_warps=
1, num_stages=1)
triton_per_fused_index_put_max_7[grid(1)](buf7, buf4, buf11, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf14 = buf7
del buf7
triton_poi_fused_6[grid(16)](buf11, buf14, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_poi_fused_index_put_lift_fresh_8[grid(4)](buf11, buf14, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf18 = buf4
del buf4
triton_poi_fused_9[grid(16)](buf14, buf18, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_per_fused_index_put_max_10[grid(1)](buf14, buf11, buf18, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
buf21 = buf14
del buf14
triton_poi_fused_9[grid(16)](buf18, buf21, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_poi_fused_index_put_lift_fresh_11[grid(4)](buf18, buf21, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf25 = buf11
del buf11
triton_poi_fused_12[grid(16)](buf21, buf25, 16, XBLOCK=16,
num_warps=1, num_stages=1)
triton_per_fused_index_put_max_13[grid(1)](buf21, buf18, buf25, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del buf18
del buf21
buf31 = empty_strided_cuda((), (), torch.float32)
buf32 = buf31
del buf31
triton_per_fused_add_div_mean_mul_rsub_sum_14[grid(1)](buf32, buf0,
buf25, buf1, buf29, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf25
del buf29
return buf32,
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndimension() < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class Weight(Enum):
"""
See also: :py:class:`monai.losses.dice.GeneralizedDiceLoss`
"""
SQUARE = 'square'
SIMPLE = 'simple'
UNIFORM = 'uniform'
class GeneralizedDiceLossNew(_Loss):
"""
Compute the generalised Dice loss defined in:
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017.
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, w_type: 'Union[Weight, str]'=Weight.
SQUARE, reduction: 'Union[LossReduction, str]'=LossReduction.MEAN,
smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch: 'bool'=False
) ->None:
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
w_type: {``"square"``, ``"simple"``, ``"uniform"``}
Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, intersection over union is computed from each item in the batch.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
w_type = Weight(w_type)
self.w_func: 'Callable' = torch.ones_like
if w_type == Weight.SIMPLE:
self.w_func = torch.reciprocal
elif w_type == Weight.SQUARE:
self.w_func = lambda x: torch.reciprocal(x * x)
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Irme/MONAI | GeneralizedDiceLoss | false | 17,472 | [
"Apache-2.0"
] | 3 | dc4bf661831b14f4231cb325cc1b15d38c1e406c | https://github.com/Irme/MONAI/tree/dc4bf661831b14f4231cb325cc1b15d38c1e406c |
MedianPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/h6/ch66a7fv4ibs6uzgjzlpbnvvgycnijp2555725lecsqnqdvnwhku.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%unfold_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3) % 3
x2 = (xindex // 9) % 4
x3 = (xindex // 36) % 4
x4 = (xindex // 144)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0 + x2))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1 + x3))))) + (16*x4)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 3, 3), (576, 144, 36, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 2304, grid=grid(2304), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [median], Original ATen: [aten.median]
buf1 = torch.ops.aten.median.dim(reinterpret_tensor(buf0, (4, 4, 4, 4, 9), (576, 144, 36, 9, 1), 0), -1)
del buf0
buf2 = buf1[0]
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _quadruple
class MedianPool2d(nn.Module):
"""Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=True):
"""Initialize with kernel_size, stride, padding."""
super().__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, x):
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1],
self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _quadruple
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 3
x2 = xindex // 9 % 4
x3 = xindex // 36 % 4
x4 = xindex // 144
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0 + x2)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1 + x3)) + 16 *
x4), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 3, 3), (576, 144, 36, 9, 3,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(2304)](arg0_1, buf0, 2304, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = torch.ops.aten.median.dim(reinterpret_tensor(buf0, (4, 4, 4,
4, 9), (576, 144, 36, 9, 1), 0), -1)
del buf0
buf2 = buf1[0]
del buf1
return buf2,
class MedianPool2dNew(nn.Module):
"""Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=True):
"""Initialize with kernel_size, stride, padding."""
super().__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Jiaqi0602/adversarial-attack-from-leakage | MedianPool2d | false | 17,473 | [
"BSD-3-Clause"
] | 9 | 90db721bed10094ac7d458b232ad5b1573884338 | https://github.com/Jiaqi0602/adversarial-attack-from-leakage/tree/90db721bed10094ac7d458b232ad5b1573884338 |
SlideNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/cx/ccxx7ogetsg5xueniw76nfwj5k7icpgxq2up6h7z7ykwrncdxbj5.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 139240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3481) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/o7/co7ls5kqvghq3jdq5zcjmuo3qappfgmqrbllrrjk6ktqptzazlav.py
# Topologically Sorted Source Nodes: [max_pool2d, out], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# out => relu
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 33640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 29
x1 = (xindex // 29) % 29
x4 = (xindex // 841)
x3 = (xindex // 8410)
x5 = xindex % 8410
x6 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (118*x1) + (3481*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (118*x1) + (3481*x4)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (59 + (2*x0) + (118*x1) + (3481*x4)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (60 + (2*x0) + (118*x1) + (3481*x4)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x5 + (8448*x3)), tmp15, xmask)
tl.store(out_ptr1 + (x6), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/5l/c5l7ltf344otnjpfyaapq4qwqey4ylylng5iqyy7djd4lehvdg53.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 54080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 676) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/hl/chltjbp3rnxoqp5o76byg2epbh4ocgaan53kmzid6lr5nk7vwfqh.py
# Topologically Sorted Source Nodes: [max_pool2d_1, out_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# out_1 => relu_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 13520
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x4 = (xindex // 13)
x2 = (xindex // 3380)
x3 = xindex % 3380
tmp0 = tl.load(in_ptr0 + ((2*x0) + (52*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (52*x4)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (26 + (2*x0) + (52*x4)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (27 + (2*x0) + (52*x4)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + (x3 + (3456*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x3 + (3392*x2)), tmp18, xmask)
tl.store(out_ptr2 + (x3 + (3456*x2)), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/xd/cxdcrcgytik6vvilb4a4snbnj2k27uyl5mifs4jfk7mmfh7rmw5k.py
# Topologically Sorted Source Nodes: [max_pool2d_1, out_1, out_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.view]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1
# out_1 => relu_1
# out_2 => view
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [-1, 80]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_view_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_view_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_view_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 13520
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((3392*(x0 // 3380)) + (x0 % 3380)), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/7h/c7h5654sbj3tz2yxhfkd7tqafczrfvsxn47vvv4yxfk7d32lve6r.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_3 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 5408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/r2/cr2vlnbn37rn7s42pd3fijrsi3osnwx3uoqyjfnptc63nggaq2gy.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 6, 6), (36, 36, 6, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 4, 4), (160, 16, 4, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (32, 80), (80, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (16, 32), (32, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (1, 16), (16, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 59, 59), (34810, 3481, 59, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 139240, grid=grid(139240), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 10, 29, 29), (8448, 841, 29, 1), torch.int8)
buf3 = empty_strided_cuda((4, 10, 29, 29), (8410, 841, 29, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d, out], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_1.run(buf1, buf2, buf3, 33640, grid=grid(33640), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 26, 26), (13520, 676, 26, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 54080, grid=grid(54080), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1), torch.int8)
buf7 = empty_strided_cuda((4, 20, 13, 13), (3392, 169, 13, 1), torch.float32)
buf15 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1), torch.bool)
# Topologically Sorted Source Nodes: [max_pool2d_1, out_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3.run(buf5, buf6, buf7, buf15, 13520, grid=grid(13520), stream=stream0)
buf8 = empty_strided_cuda((169, 80), (80, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d_1, out_1, out_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.view]
triton_poi_fused_max_pool2d_with_indices_relu_view_4.run(buf7, buf8, 13520, grid=grid(13520), stream=stream0)
del buf7
buf9 = empty_strided_cuda((169, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (80, 32), (1, 80), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf10, primals_7, 5408, grid=grid(5408), stream=stream0)
del primals_7
buf11 = empty_strided_cuda((169, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (32, 16), (1, 32), 0), out=buf11)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf12, primals_9, 2704, grid=grid(2704), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((169, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf12, reinterpret_tensor(primals_10, (16, 1), (1, 16), 0), alpha=1, beta=1, out=buf14)
del primals_11
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, buf8, buf10, buf12, primals_10, primals_8, primals_6, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 1, 6, 6), (36, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 10, 4, 4), (160, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 80), (80, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class SlideNet(nn.Module):
"""
Slided window network
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=6)
self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size=4)
self.fc1 = nn.Linear(in_features=80, out_features=32)
self.fc2 = nn.Linear(in_features=32, out_features=16)
self.fc3 = nn.Linear(in_features=16, out_features=1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
out = F.relu(F.max_pool2d(self.conv2(out), kernel_size=2))
out = out.view(-1, 80)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
return self.fc3(out)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 139240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3481 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 33640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 29
x1 = xindex // 29 % 29
x4 = xindex // 841
x3 = xindex // 8410
x5 = xindex % 8410
x6 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 118 * x1 + 3481 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 118 * x1 + 3481 * x4), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (59 + 2 * x0 + 118 * x1 + 3481 * x4), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (60 + 2 * x0 + 118 * x1 + 3481 * x4), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x5 + 8448 * x3), tmp15, xmask)
tl.store(out_ptr1 + x6, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 54080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 13520
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x4 = xindex // 13
x2 = xindex // 3380
x3 = xindex % 3380
tmp0 = tl.load(in_ptr0 + (2 * x0 + 52 * x4), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 52 * x4), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (26 + 2 * x0 + 52 * x4), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (27 + 2 * x0 + 52 * x4), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + (x3 + 3456 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x3 + 3392 * x2), tmp18, xmask)
tl.store(out_ptr2 + (x3 + 3456 * x2), tmp20, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_view_4(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 13520
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3392 * (x0 // 3380) + x0 % 3380), xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 5408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (10, 1, 6, 6), (36, 36, 6, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 4, 4), (160, 16, 4, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (32, 80), (80, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (16, 32), (32, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (1, 16), (16, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 59, 59), (34810, 3481, 59, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(139240)](buf1, primals_2,
139240, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 10, 29, 29), (8448, 841, 29, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 10, 29, 29), (8410, 841, 29, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(33640)](buf1,
buf2, buf3, 33640, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 26, 26), (13520, 676, 26, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(54080)](buf5, primals_5, 54080,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 20, 13, 13), (3392, 169, 13, 1),
torch.float32)
buf15 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(13520)](buf5, buf6, buf7, buf15, 13520, XBLOCK=128, num_warps=
4, num_stages=1)
buf8 = empty_strided_cuda((169, 80), (80, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_view_4[grid(13520)](buf7,
buf8, 13520, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((169, 32), (32, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (80, 32), (1,
80), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_5[grid(5408)](buf10, primals_7, 5408, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf11 = empty_strided_cuda((169, 16), (16, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (32, 16), (1,
32), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_relu_6[grid(2704)](buf12, primals_9, 2704, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((169, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, buf12, reinterpret_tensor(
primals_10, (16, 1), (1, 16), 0), alpha=1, beta=1, out=buf14)
del primals_11
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, buf8, buf10, buf12, primals_10, primals_8, primals_6, buf15)
class SlideNetNew(nn.Module):
"""
Slided window network
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=6)
self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size=4)
self.fc1 = nn.Linear(in_features=80, out_features=32)
self.fc2 = nn.Linear(in_features=32, out_features=16)
self.fc3 = nn.Linear(in_features=16, out_features=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| JHorcasitas/cnn_document_binarization | SlideNet | false | 17,474 | [
"MIT"
] | 9 | 075f76aed375ca14a53011f4dfeb12379debb5b3 | https://github.com/JHorcasitas/cnn_document_binarization/tree/075f76aed375ca14a53011f4dfeb12379debb5b3 |
ActionScoring | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/mg/cmgidiawtgj4j4yvfspjfgphubyvg6afkdsr6ts4xhxphuj227lu.py
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# product => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 16384
x0 = xindex % 4096
x2 = (xindex // 16384)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 4), (4, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [context], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4, 256), (16384, 4096, 1024, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf1, buf0, buf2, 65536, grid=grid(65536), stream=stream0)
buf4 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf4)
del primals_8
return (reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ActionScoring(nn.Module):
""" Linearly mapping h and v to the same dimension,
and do a elementwise multiplication and a linear scoring. """
def __init__(self, action_size, hidden_size, dot_size: 'int'=256):
super(ActionScoring, self).__init__()
self.linear_act = nn.Linear(action_size, dot_size, bias=True)
self.linear_hid = nn.Linear(hidden_size, dot_size, bias=True)
self.linear_out = nn.Linear(dot_size, 1, bias=True)
def forward(self, act_cands, h_tilde):
""" Compute logits of action candidates
:param act_cands: torch.Tensor(batch, num_candidates, action_emb_size)
:param h_tilde: torch.Tensor(batch, hidden_size)
Return -> torch.Tensor(batch, num_candidates)
"""
target = self.linear_hid(h_tilde).unsqueeze(1)
context = self.linear_act(act_cands)
product = torch.mul(context, target)
logits = self.linear_out(product).squeeze(2)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'action_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 16384
x0 = xindex % 4096
x2 = xindex // 16384
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 4), (4, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 256), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4, 256), (16384, 4096, 1024,
256, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(65536)](buf1, buf0, buf2, 65536, XBLOCK
=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 256),
(256, 1), 0), reinterpret_tensor(primals_7, (256, 1), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_8
return reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), primals_7
class ActionScoringNew(nn.Module):
""" Linearly mapping h and v to the same dimension,
and do a elementwise multiplication and a linear scoring. """
def __init__(self, action_size, hidden_size, dot_size: 'int'=256):
super(ActionScoringNew, self).__init__()
self.linear_act = nn.Linear(action_size, dot_size, bias=True)
self.linear_hid = nn.Linear(hidden_size, dot_size, bias=True)
self.linear_out = nn.Linear(dot_size, 1, bias=True)
def forward(self, input_0, input_1):
primals_1 = self.linear_act.weight
primals_2 = self.linear_act.bias
primals_4 = self.linear_hid.weight
primals_5 = self.linear_hid.bias
primals_7 = self.linear_out.weight
primals_8 = self.linear_out.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| IMNearth/Curriculum-Learning-For-VLN | ActionScoring | false | 17,475 | [
"MIT"
] | 8 | d2fe1286eb295dc8c63a0c886b35883f32481d85 | https://github.com/IMNearth/Curriculum-Learning-For-VLN/tree/d2fe1286eb295dc8c63a0c886b35883f32481d85 |
GraphConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/4g/c4guhk7x6skkidedvs2gxz2kcu6gb76l3ig5crjjvjtzvnjlhlte.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_4, buf4, 256, grid=grid(256), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_5, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
from torch.nn import init
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, act=nn.ReLU(),
normalize_input=True):
super(MLP, self).__init__()
self.linear_1 = nn.Linear(input_dim, hidden_dim)
self.linear_2 = nn.Linear(hidden_dim, output_dim)
self.act = act
self.normalize_input = normalize_input
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data = init.xavier_uniform_(m.weight.data, gain=nn
.init.calculate_gain('relu'))
if m.bias is not None:
m.bias.data = init.constant_(m.bias.data, 0.0)
def forward(self, x):
if self.normalize_input:
x = (x - torch.mean(x, dim=0)) / torch.std(x, dim=0)
x = self.act(self.linear_1(x))
return self.linear_2(x)
class GraphConv(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim,
normalize_embedding=False, normalize_embedding_l2=False, att=False,
mpnn=False, graphsage=False):
super(GraphConv, self).__init__()
self.normalize_embedding = normalize_embedding
self.normalize_embedding_l2 = normalize_embedding_l2
self.input_dim = input_dim
self.output_dim = output_dim
self.att = att
self.mpnn = mpnn
self.graphsage = graphsage
if self.graphsage:
self.out_compute = MLP(input_dim=input_dim * 2, hidden_dim=
hidden_dim, output_dim=output_dim, act=nn.ReLU(),
normalize_input=False)
elif self.mpnn:
self.out_compute = MLP(input_dim=hidden_dim, hidden_dim=
hidden_dim, output_dim=output_dim, act=nn.ReLU(),
normalize_input=False)
else:
self.out_compute = MLP(input_dim=input_dim, hidden_dim=
hidden_dim, output_dim=output_dim, act=nn.ReLU(),
normalize_input=False)
if self.att:
self.att_compute = MLP(input_dim=input_dim, hidden_dim=
hidden_dim, output_dim=output_dim, act=nn.LeakyReLU(0.2),
normalize_input=False)
if self.mpnn:
self.mpnn_compute = MLP(input_dim=input_dim * 2, hidden_dim=
hidden_dim, output_dim=hidden_dim, act=nn.ReLU(),
normalize_input=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, adj):
if self.att:
x_att = self.att_compute(x)
att = x_att @ x_att.permute(1, 0)
att = self.softmax(att)
pred = torch.matmul(adj * att, x)
elif self.mpnn:
x1 = x.unsqueeze(0).repeat(x.shape[0], 1, 1)
x2 = x.unsqueeze(1).repeat(1, x.shape[0], 1)
e = torch.cat((x1, x2), dim=-1)
e = self.mpnn_compute(e)
pred = torch.mean(adj.unsqueeze(-1) * e, dim=1)
else:
pred = torch.matmul(adj, x)
if self.graphsage:
pred = torch.cat((pred, x), dim=-1)
pred = self.out_compute(pred)
if self.normalize_embedding:
pred = (pred - torch.mean(pred, dim=0)) / torch.std(pred, dim=0)
if self.normalize_embedding_l2:
pred = F.normalize(pred, p=2, dim=-1)
return pred
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2,
primals_4, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(
buf2, (64, 4), (4, 1), 0), primals_5, buf4
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, act=nn.ReLU(),
normalize_input=True):
super(MLP, self).__init__()
self.linear_1 = nn.Linear(input_dim, hidden_dim)
self.linear_2 = nn.Linear(hidden_dim, output_dim)
self.act = act
self.normalize_input = normalize_input
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data = init.xavier_uniform_(m.weight.data, gain=nn
.init.calculate_gain('relu'))
if m.bias is not None:
m.bias.data = init.constant_(m.bias.data, 0.0)
def forward(self, x):
if self.normalize_input:
x = (x - torch.mean(x, dim=0)) / torch.std(x, dim=0)
x = self.act(self.linear_1(x))
return self.linear_2(x)
class GraphConvNew(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim,
normalize_embedding=False, normalize_embedding_l2=False, att=False,
mpnn=False, graphsage=False):
super(GraphConvNew, self).__init__()
self.normalize_embedding = normalize_embedding
self.normalize_embedding_l2 = normalize_embedding_l2
self.input_dim = input_dim
self.output_dim = output_dim
self.att = att
self.mpnn = mpnn
self.graphsage = graphsage
if self.graphsage:
self.out_compute = MLP(input_dim=input_dim * 2, hidden_dim=
hidden_dim, output_dim=output_dim, act=nn.ReLU(),
normalize_input=False)
elif self.mpnn:
self.out_compute = MLP(input_dim=hidden_dim, hidden_dim=
hidden_dim, output_dim=output_dim, act=nn.ReLU(),
normalize_input=False)
else:
self.out_compute = MLP(input_dim=input_dim, hidden_dim=
hidden_dim, output_dim=output_dim, act=nn.ReLU(),
normalize_input=False)
if self.att:
self.att_compute = MLP(input_dim=input_dim, hidden_dim=
hidden_dim, output_dim=output_dim, act=nn.LeakyReLU(0.2),
normalize_input=False)
if self.mpnn:
self.mpnn_compute = MLP(input_dim=input_dim * 2, hidden_dim=
hidden_dim, output_dim=hidden_dim, act=nn.ReLU(),
normalize_input=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0, input_1):
primals_3 = self.out_compute.linear_1.weight
primals_4 = self.out_compute.linear_1.bias
primals_5 = self.out_compute.linear_2.weight
primals_6 = self.out_compute.linear_2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| JiaxuanYou/graph-pooling | GraphConv | false | 17,476 | [
"MIT"
] | 5 | e6237f03a72ac55d8a10192ca36fa596973461f5 | https://github.com/JiaxuanYou/graph-pooling/tree/e6237f03a72ac55d8a10192ca36fa596973461f5 |
SALayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/g4/cg4ktrykrbz7pvagnx6ibqvgol6i2m6cad224574tjik5bwhlbin.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# a => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %getitem], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/lz/clz3tpmlugy7uw5auhl6n6n2mmbfdswasrmjrgdr5swueyrperkb.py
# Topologically Sorted Source Nodes: [a_2, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# a_2 => sigmoid
# mul => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_1), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x3), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a_2, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(buf1, primals_1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_2, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class SALayer(nn.Module):
def __init__(self, kernel_size=7):
super(SALayer, self).__init__()
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
a = torch.cat([avg_out, max_out], dim=1)
a = self.conv1(a)
a = self.sigmoid(a)
return a * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr1 + x3, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, primals_1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf0, buf1
class SALayerNew(nn.Module):
def __init__(self, kernel_size=7):
super(SALayerNew, self).__init__()
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| JiahangGu/RFN | SALayer | false | 17,477 | [
"MIT"
] | 4 | 8f7b33e22bb0a9f4057476720e05cc694a46ec00 | https://github.com/JiahangGu/RFN/tree/8f7b33e22bb0a9f4057476720e05cc694a46ec00 |
NN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nx/cnxkayabsarjngkjglfymnwspz7yhjb2tbkhamgf6xviihv2u2ho.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (3, 16), (16, 1))
assert_size_stride(primals_5, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 1024, grid=grid(1024), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 3), (1, 16), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((3, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class NN(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(4, 16)
self.fc2 = nn.Linear(16, 3)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (3, 16), (16, 1))
assert_size_stride(primals_5, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1,
primals_2, buf3, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 3), (1, 16), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 3), (48, 12, 3, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3
class NNNew(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(4, 16)
self.fc2 = nn.Linear(16, 3)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Jie-Yuan/Deeps | NN | false | 17,478 | [
"MIT"
] | 4 | b4acbb8e16b8ff5d181e70c3b549df0d818d0d76 | https://github.com/Jie-Yuan/Deeps/tree/b4acbb8e16b8ff5d181e70c3b549df0d818d0d76 |
WeightedCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/go/cgobu5sa2tyxtk6mrlov2ofgra2mvlz7hlazhico2xqgi24yq2dl.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, clone, sub
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vh/cvhukx6jny3lzngngzgomrj7cky7jbseunfw5z3n4zit7ppv5ftk.py
# Topologically Sorted Source Nodes: [target, cross_entropy], Original ATen: [aten.argmax, aten.nll_loss2d_forward]
# Source node to ATen node mapping:
# cross_entropy => full_default_1, ne_1, neg, where_1
# target => argmax
# Graph fragment:
# %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%arg1_1, -1), kwargs = {})
# %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%view_1, -100), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {})
triton_poi_fused_argmax_nll_loss2d_forward_1 = async_compile.triton('triton_poi_fused_argmax_nll_loss2d_forward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_nll_loss2d_forward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_argmax_nll_loss2d_forward_1(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert(((0 <= tmp53) & (tmp53 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp53 < 4")
tmp55 = tl.load(in_ptr1 + (tmp53 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tl.store(out_ptr1 + (x0), tmp71, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/im/cimj3zcmmwkch43geltms4rxmgsxa52vmgqjukcerfaf5c3rob2a.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# loss => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %arg2_1), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf1, 64, grid=grid(64), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [target, cross_entropy], Original ATen: [aten.argmax, aten.nll_loss2d_forward]
triton_poi_fused_argmax_nll_loss2d_forward_1.run(arg1_1, buf1, buf2, 16, grid=grid(16), stream=stream0)
del arg1_1
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf2, arg2_1, buf3, 64, grid=grid(64), stream=stream0)
del arg2_1
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_argmax_nll_loss2d_forward_1(in_ptr0, in_ptr1, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp56 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp61 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp64 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4) | ~xmask,
'index out of bounds: 0 <= tmp53 < 4')
tmp55 = tl.load(in_ptr1 + (tmp53 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tl.store(out_ptr1 + x0, tmp71, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(64)](arg0_1, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_argmax_nll_loss2d_forward_1[grid(16)](arg1_1, buf1,
buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_mul_2[grid(64)](buf2, arg2_1, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg2_1
del buf2
return buf3,
class WeightedCrossEntropyLossNew(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Jiaolong/trajectory-prediction | WeightedCrossEntropyLoss | false | 17,479 | [
"Apache-2.0"
] | 6 | 3fd4e6253b44dfdc86e7c08e93c002baf66f2e46 | https://github.com/Jiaolong/trajectory-prediction/tree/3fd4e6253b44dfdc86e7c08e93c002baf66f2e46 |
SRB | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/nd/cndrb2by2g3ednlxsjtytxjhjdm7bdlrgvzzodhsxvxnpml2di7p.py
# Topologically Sorted Source Nodes: [conv2d, o1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# o1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/yf/cyf4onw64kisylvpovi56es47z2xblpjc7lad7245qvttvjjn43d.py
# Topologically Sorted Source Nodes: [conv2d_1, o2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# o2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/6l/c6lm5bqq6wlfzcn4qvwo7jp4e6jgwvdmvea3gnr7v5n4yp37uc6y.py
# Topologically Sorted Source Nodes: [o3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# o3 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (3, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, o1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, o2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [o3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [o3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_7, 49152, grid=grid(49152), stream=stream0)
del primals_7
return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 9, 9), (243, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((3, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class SRB(nn.Module):
def __init__(self):
super(SRB, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 9, padding=4)
self.conv2 = nn.Conv2d(64, 32, 5, padding=2)
self.conv3 = nn.Conv2d(32, 3, 5, padding=2)
self.act = nn.ReLU(True)
def forward(self, x):
o1 = self.act(self.conv1(x))
o2 = self.act(self.conv2(o1))
o3 = self.conv3(o2)
return o3
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (3, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(49152)](buf5, primals_7, 49152,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3
class SRBNew(nn.Module):
def __init__(self):
super(SRBNew, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 9, padding=4)
self.conv2 = nn.Conv2d(64, 32, 5, padding=2)
self.conv3 = nn.Conv2d(32, 3, 5, padding=2)
self.act = nn.ReLU(True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| JiahangGu/RFN | SRB | false | 17,480 | [
"MIT"
] | 4 | 8f7b33e22bb0a9f4057476720e05cc694a46ec00 | https://github.com/JiahangGu/RFN/tree/8f7b33e22bb0a9f4057476720e05cc694a46ec00 |
SigmoidFocalClassificationLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/3m/c3mhxseecpkqbqvdyzfib5liiej2nz7jgrbqydwfjbhv3nwjb7th.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, alpha_weight, pred_sigmoid, sub_1, mul_2, sub_2, mul_3, pt, pow_1, focal_weight, clamp, mul_5, sub_3, abs_1, neg, exp, log1p, loss, loss_1, mul_7], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.clamp, aten.sub, aten.abs, aten.neg, aten.exp, aten.log1p]
# Source node to ATen node mapping:
# abs_1 => abs_1
# alpha_weight => add
# clamp => clamp_min
# exp => exp
# focal_weight => mul_4
# log1p => log1p
# loss => add_2
# loss_1 => mul_6
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_5 => mul_5
# mul_7 => mul_7
# neg => neg
# pow_1 => pow_1
# pred_sigmoid => sigmoid
# pt => add_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.75), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %sigmoid), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 2.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %pow_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %mul_5), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_3, %log1p), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %add_2), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %arg2_1), kwargs = {})
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp26 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = 0.75
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp3 - tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp4 * tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp7 * tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp8, tmp16)
tmp18 = tmp8 * tmp0
tmp19 = tmp17 - tmp18
tmp20 = tl_math.abs(tmp8)
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = libdevice.log1p(tmp22)
tmp24 = tmp19 + tmp23
tmp25 = tmp15 * tmp24
tmp27 = tmp25 * tmp26
tl.store(out_ptr0 + (x0), tmp27, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub, mul_1, alpha_weight, pred_sigmoid, sub_1, mul_2, sub_2, mul_3, pt, pow_1, focal_weight, clamp, mul_5, sub_3, abs_1, neg, exp, log1p, loss, loss_1, mul_7], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.clamp, aten.sub, aten.abs, aten.neg, aten.exp, aten.log1p]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: 'torch.Tensor', target:
'torch.Tensor'):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch
.exp(-torch.abs(input)))
return loss
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or weights.shape.__len__(
) == 1 and target.shape.__len__() == 2:
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0(
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp26 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = 0.75
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp3 - tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp4 * tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp7 * tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp8, tmp16)
tmp18 = tmp8 * tmp0
tmp19 = tmp17 - tmp18
tmp20 = tl_math.abs(tmp8)
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = libdevice.log1p(tmp22)
tmp24 = tmp19 + tmp23
tmp25 = tmp15 * tmp24
tmp27 = tmp25 * tmp26
tl.store(out_ptr0 + x0, tmp27, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0[
grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class SigmoidFocalClassificationLossNew(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: 'torch.Tensor', target:
'torch.Tensor'):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch
.exp(-torch.abs(input)))
return loss
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Jiaolong/trajectory-prediction | SigmoidFocalClassificationLoss | false | 17,481 | [
"Apache-2.0"
] | 6 | 3fd4e6253b44dfdc86e7c08e93c002baf66f2e46 | https://github.com/Jiaolong/trajectory-prediction/tree/3fd4e6253b44dfdc86e7c08e93c002baf66f2e46 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.